Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVIterator


    /*
     * Begin the import - iterate the files in the path
     */
    for (FileStatus importStatus : fs.listStatus(importPath)) {
      try {
        FileSKVIterator importIterator = FileOperations.getInstance()
            .openReader(importStatus.getPath().toString(), true, fs,
                fs.getConf(), AccumuloConfiguration.getDefaultConfiguration());
        while (importIterator.hasTop()) {
          Key key = importIterator.getTopKey();
          Value value = importIterator.getTopValue();
          if (setTime) {
            key.setTimestamp(time);
          }
          Mutation mutation = new Mutation(key.getRow());
          if (!key.isDeleted()) {
            mutation.put(key.getColumnFamily(), key.getColumnQualifier(),
                new ColumnVisibility(key.getColumnVisibilityData().toArray()),
                key.getTimestamp(), value);
          } else {
            mutation.putDelete(key.getColumnFamily(), key.getColumnQualifier(),
                new ColumnVisibility(key.getColumnVisibilityData().toArray()),
                key.getTimestamp());
          }
          table.addMutation(mutation);
          importIterator.next();
        }
      } catch (Exception e) {
        FSDataOutputStream failureWriter = null;
        DataInputStream failureReader = null;
        try {
View Full Code Here


     
      long rtime = Long.MIN_VALUE;
      for (String path : datafiles.keySet()) {
        String filename = new Path(path).getName();
       
        FileSKVIterator reader = FileOperations.getInstance().openReader(this.location + "/" + filename, true, fs, fs.getConf(),
            tabletServer.getTableConfiguration(extent));
        long maxTime = -1;
        try {
         
          while (reader.hasTop()) {
            maxTime = Math.max(maxTime, reader.getTopKey().getTimestamp());
            reader.next();
          }
         
        } finally {
          reader.close();
        }
       
        if (maxTime > rtime) {
          time = TabletTime.LOGICAL_TIME_ID + "" + maxTime;
          rtime = maxTime;
View Full Code Here

   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
        openReader.close();
      }
    }
    return falks;
  }
View Full Code Here

    /*
     * Begin the import - iterate the files in the path
     */
    for (FileStatus importStatus : fs.listStatus(importPath)) {
      try {
        FileSKVIterator importIterator = FileOperations.getInstance().openReader(importStatus.getPath().toString(), true, fs, fs.getConf(),
            AccumuloConfiguration.getDefaultConfiguration());
        while (importIterator.hasTop()) {
          Key key = importIterator.getTopKey();
          Value value = importIterator.getTopValue();
          if (setTime) {
            key.setTimestamp(time);
          }
          Mutation mutation = new Mutation(key.getRow());
          if (!key.isDeleted()) {
            mutation.put(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()), key.getTimestamp(),
                value);
          } else {
            mutation.putDelete(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()),
                key.getTimestamp());
          }
          table.addMutation(mutation);
          importIterator.next();
        }
      } catch (Exception e) {
        FSDataOutputStream failureWriter = null;
        DataInputStream failureReader = null;
        try {
View Full Code Here

   
    readers.clear();
   
    // TODO need to close files - ACCUMULO-1303
    for (String file : absFiles) {
      FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, conf, acuTableConf, null, null);
      readers.add(reader);
    }
   
    MultiIterator multiIter = new MultiIterator(readers, extent);
   
View Full Code Here

  }
 
  @Override
  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
      AccumuloConfiguration tableConf) throws IOException {
    FileSKVIterator iter = openReader(file, false, fs, conf, tableConf, null, null);
    iter.seek(range, columnFamilies, inclusive);
    return iter;
  }
View Full Code Here

  }
 
  @Override
  public FileSKVIterator openReader(String file, Range range, Set<ByteSequence> columnFamilies, boolean inclusive, FileSystem fs, Configuration conf,
      AccumuloConfiguration tableConf, BlockCache dataCache, BlockCache indexCache) throws IOException {
    FileSKVIterator iter = openReader(file, false, fs, conf, tableConf, dataCache, indexCache);
    iter.seek(range, columnFamilies, inclusive);
    return iter;
  }
View Full Code Here

      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, acuTableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
View Full Code Here

    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, acuTableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
        if (filesToCompact.get(mapFile).isTimeSet()) {
          iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
        }
       
        iters.add(iter);
       
      } catch (Throwable e) {
       
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_READ, mapFile, e));
       
        log.warn("Some problem opening map file " + mapFile + " " + e.getMessage(), e);
        // failed to open some map file... close the ones that were opened
        for (FileSKVIterator reader : readers) {
          try {
            reader.close();
          } catch (Throwable e2) {
            log.warn("Failed to close map file", e2);
          }
        }
       
View Full Code Here

   
    // open any files that need to be opened
    for (String file : filesToOpen) {
      try {
        // log.debug("Opening "+file);
        FileSKVIterator reader = FileOperations.getInstance().openReader(file, false, fs, fs.getConf(), conf.getTableConfiguration(table.toString()),
            dataCache, indexCache);
        reservedFiles.add(reader);
        readersReserved.put(reader, file);
      } catch (Exception e) {
       
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.