Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileOperations


    FileSKVWriter mfw = null;
   
    CompactionStats majCStats = new CompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
      mfw = fileFactory.openWriter(outputFile, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, tableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
      log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
          majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
     
      majCStats.setFileSize(fileFactory.getFileSize(outputFile, fs, conf, ServerConfiguration.getTableConfiguration(extent.getTableId().toString())));
      return majCStats;
    } catch (IOException e) {
      log.error(e, e);
      throw e;
    } catch (RuntimeException e) {
View Full Code Here


    List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
   
    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, tableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
View Full Code Here

      return null;
    return new CompactionTuple(toCompact, toCompact.size() == files.size());
  }
 
  private Map<String,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<String,DataFileValue> files) throws IOException {
    FileOperations fileFactory = FileOperations.getInstance();
   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

    List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(mapFiles.size());
   
    for (String mapFile : mapFiles) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
            .toString());
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, tableConf);
       
        readers.add(reader);
        iters.add(new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader));
       
      } catch (Throwable e) {
View Full Code Here

    FileSKVWriter mfw = null;
   
    MajorCompactionStats majCStats = new MajorCompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
          .toString());
      mfw = fileFactory.openWriter(compactTmpName, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(compactTmpName, false, fs, conf, tableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful major compaction fails!!!", ex);
        throw ex;
      }
View Full Code Here

        long t1 = System.currentTimeMillis();
       
        AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
            .toString());
       
        FileOperations fileOps = FileOperations.getInstance();
        mfw = fileOps.openWriter(dirname, fs, conf, tableConf);
       
        DeletingIterator sourceItr = new DeletingIterator(new ColumnFamilySkippingIterator(map.skvIterator()), true);
       
        TabletIteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.minc, tableConf);
        SortedKeyValueIterator<Key,Value> fai = IteratorUtil.loadIterators(IteratorScope.minc, sourceItr, extent, tableConf, iterEnv);
        fai.seek(new Range(), LocalityGroupUtil.EMPTY_CF_SET, false);
       
        long entriesCompacted = 0;
       
        Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(tableConf);
        if (groups.size() > 0 && mfw.supportsLocalityGroups()) {
          entriesCompacted = partitionData(fai, groups, mfw);
        } else {
         
          // no locality groups or locality groups not supported,
          // so just write everything to default
          mfw.startDefaultLocalityGroup();
          while (fai.hasTop()) {
            mfw.append(fai.getTopKey(), fai.getTopValue());
            fai.next();
            entriesCompacted++;
          }
        }
       
        mfw.close();
       
        // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
        try {
          FileSKVIterator openReader = fileOps.openReader(dirname, false, fs, conf, tableConf);
          openReader.close();
        } catch (IOException ex) {
          log.error("Verification of successful file write fails!!!", ex);
          throw ex;
        }
       
        long t2 = System.currentTimeMillis();
        log.debug(String.format("MinC %,d recs in | %,d recs out | %,d recs/sec | %6.3f secs | %,d bytes ", map.size(), entriesCompacted,
            (int) (map.size() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0, estimatedSizeInBytes()));
       
        if (reportedProblem) {
          ProblemReports.getInstance().deleteProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname);
        }
       
        return new DataFileValue(fileOps.getFileSize(dirname, fs, conf,
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString())), entriesCompacted);
      } catch (IOException e) {
        log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
        reportedProblem = true;
View Full Code Here

      return null;
    return new CompactionTuple(toCompact, toCompact.size() == files.size());
  }
 
  private Map<String,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<String,DataFileValue> files) throws IOException {
    FileOperations fileFactory = FileOperations.getInstance();
   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileOperations

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.