Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVWriter


  }
 
  private MajorCompactionStats compactFiles(Configuration conf, FileSystem fs, Set<String> filesToCompact, boolean propogateDeletes, KeyExtent extent,
      String compactTmpName) throws IOException, MajorCompactionCanceledException {
   
    FileSKVWriter mfw = null;
   
    MajorCompactionStats majCStats = new MajorCompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
          .toString());
      mfw = fileFactory.openWriter(compactTmpName, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(compactTmpName, false, fs, conf, tableConf);
        openReader.close();
View Full Code Here


    double growthFactor = 4;
    int maxSleepTime = 1000 * Constants.DEFAULT_MINOR_COMPACTION_MAX_SLEEP_TIME;
    boolean reportedProblem = false;
   
    do {
      FileSKVWriter mfw = null;
      try {
        long t1 = System.currentTimeMillis();
       
        AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
            .toString());
       
        FileOperations fileOps = FileOperations.getInstance();
        mfw = fileOps.openWriter(dirname, fs, conf, tableConf);
       
        DeletingIterator sourceItr = new DeletingIterator(new ColumnFamilySkippingIterator(map.skvIterator()), true);
       
        TabletIteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.minc, tableConf);
        SortedKeyValueIterator<Key,Value> fai = IteratorUtil.loadIterators(IteratorScope.minc, sourceItr, extent, tableConf, iterEnv);
        fai.seek(new Range(), LocalityGroupUtil.EMPTY_CF_SET, false);
       
        long entriesCompacted = 0;
       
        Map<String,Set<ByteSequence>> groups = LocalityGroupUtil.getLocalityGroups(tableConf);
        if (groups.size() > 0 && mfw.supportsLocalityGroups()) {
          entriesCompacted = partitionData(fai, groups, mfw);
        } else {
         
          // no locality groups or locality groups not supported,
          // so just write everything to default
          mfw.startDefaultLocalityGroup();
          while (fai.hasTop()) {
            mfw.append(fai.getTopKey(), fai.getTopValue());
            fai.next();
            entriesCompacted++;
          }
        }
       
        mfw.close();
       
        // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
        try {
          FileSKVIterator openReader = fileOps.openReader(dirname, false, fs, conf, tableConf);
          openReader.close();
        } catch (IOException ex) {
          log.error("Verification of successful file write fails!!!", ex);
          throw ex;
        }
       
        long t2 = System.currentTimeMillis();
        log.debug(String.format("MinC %,d recs in | %,d recs out | %,d recs/sec | %6.3f secs | %,d bytes ", map.size(), entriesCompacted,
            (int) (map.size() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0, estimatedSizeInBytes()));
       
        if (reportedProblem) {
          ProblemReports.getInstance().deleteProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname);
        }
       
        return new DataFileValue(fileOps.getFileSize(dirname, fs, conf,
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString())), entriesCompacted);
      } catch (IOException e) {
        log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
        reportedProblem = true;
      } catch (LocalityGroupConfigurationError e) {
        log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
        reportedProblem = true;
      } catch (RuntimeException e) {
        // if this is coming from a user iterator, it is possible that the user could change the iterator config and that the
        // minor compaction would succeed
        log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...", e);
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
        reportedProblem = true;
      } finally {
        try {
          if (mfw != null)
            mfw.close();
        } catch (IOException e1) {
          log.error(e1, e1);
        }
      }
     
View Full Code Here

    VersioningIterator rootIter = new VersioningIterator(delIter, 1);
   
    TreeMap<Key,Value> files = new TreeMap<Key,Value>();
   
    fs.mkdirs(new Path(Constants.getMetadataTableDir() + "_new" + "/default_tablet"));
    FileSKVWriter defaultTabletFile = FileOperations.getInstance().openWriter(Constants.getMetadataTableDir() + "_new" + "/default_tablet/00000_00000.rf", fs,
        conf, AccumuloConfiguration.getDefaultConfiguration());
    defaultTabletFile.startDefaultLocalityGroup();
   
    while (rootIter.hasTop()) {
      Key key = rootIter.getTopKey();
      Value val = rootIter.getTopValue();
     
      // System.out.println("ROOT "+key+" "+val);
     
      if (key.getRow().toString().equals("!METADATA;!METADATA<")) {
        rootIter.next();
        continue;
      }
     
      if (files.size() > 0 && !files.lastKey().getRow().equals(key.getRow())) {
        processTabletFiles(conf, fs, maxTime, tableIds, files, defaultTabletFile);
        files.clear();
      }
     
      files.put(new Key(key), new Value(val));
      rootIter.next();
    }
   
    processTabletFiles(conf, fs, maxTime, tableIds, files, defaultTabletFile);
   
    defaultTabletFile.close();
   
    fs.mkdirs(new Path(Constants.getMetadataTableDir() + "_new" + "/root_tablet"));
    FileSKVWriter rootTabletFile = FileOperations.getInstance().openWriter(Constants.getMetadataTableDir() + "_new" + "/root_tablet/00000_00000.rf", fs, conf,
        AccumuloConfiguration.getDefaultConfiguration());
    rootTabletFile.startDefaultLocalityGroup();
   
    Text rootExtent = Constants.ROOT_TABLET_EXTENT.getMetadataEntry();
   
    // root's directory
    Key rootDirKey = new Key(rootExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
    rootTabletFile.append(rootDirKey, new Value("/root_tablet".getBytes()));
   
    // root's prev row
    Key rootPrevRowKey = new Key(rootExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
    rootTabletFile.append(rootPrevRowKey, new Value(new byte[] {0}));
   
    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
   
    // default's file
    Key defaultFileKey = new Key(defaultExtent, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/default_tablet/00000_00000.rf"), 0);
    rootTabletFile.append(defaultFileKey, new Value(new DataFileValue(0, 0).encode()));
   
    // default's directory
    Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(),
        0);
    rootTabletFile.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
   
    // default's time
    Key defaultTimeKey = new Key(defaultExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
    rootTabletFile.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "" + maxTime).getBytes()));
   
    // default's prevrow
    Key defaultPrevRowKey = new Key(defaultExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(),
        Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
    rootTabletFile.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null))));
   
    rootTabletFile.close();
   
    fs.rename(new Path(Constants.getMetadataTableDir()), new Path(Constants.getMetadataTableDir() + "_old"));
    fs.rename(new Path(Constants.getMetadataTableDir() + "_new"), new Path(Constants.getMetadataTableDir()));
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.