Package org.apache.accumulo.core.util.MetadataTable

Examples of org.apache.accumulo.core.util.MetadataTable.DataFileValue


      }
      log.debug("fs " + fs + " files: " + Arrays.toString(paths) + " location: " + location);
      Collection<String> goodPaths = cleanUpFiles(fs, files, location, true);
      for (String path : goodPaths) {
        String filename = new Path(path).getName();
        DataFileValue dfv = new DataFileValue(0, 0);
        datafiles.put(locText.toString() + "/" + filename, dfv);
      }
    } else {
     
      SortedMap<Key,Value> datafilesMetadata;
     
      Text rowName = extent.getMetadataEntry();
     
      if (tabletsKeyValues != null && tabletsKeyValues.size() > 0) {
        datafilesMetadata = new TreeMap<Key,Value>();
        for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
          if (entry.getKey().compareRow(rowName) == 0 && entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
            datafilesMetadata.put(new Key(entry.getKey()), new Value(entry.getValue()));
          }
        }
      } else {
       
        ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.systemCredentials, Constants.METADATA_TABLE_ID,
            Constants.NO_AUTHS);
       
        // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
        // reduced batch size to improve performance
        // changed here after endKeys were implemented from 10 to 1000
        mdScanner.setBatchSize(1000);
       
        // leave these in, again, now using endKey for safety
        mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
       
        mdScanner.setRange(new Range(rowName));
       
        datafilesMetadata = new TreeMap<Key,Value>();
       
        for (Entry<Key,Value> entry : mdScanner) {
         
          if (entry.getKey().compareRow(rowName) != 0) {
            break;
          }
         
          datafilesMetadata.put(new Key(entry.getKey()), new Value(entry.getValue()));
        }
      }
     
      Iterator<Entry<Key,Value>> dfmdIter = datafilesMetadata.entrySet().iterator();
     
      while (dfmdIter.hasNext()) {
        Entry<Key,Value> entry = dfmdIter.next();
       
        datafiles.put(entry.getKey().getColumnQualifier().toString(), new DataFileValue(entry.getValue().get()));
      }
    }
   
    return datafiles;
  }
View Full Code Here


    long count = 0;
   
    try {
      Span span = Trace.start("write");
      count = memTable.getNumEntries();
      DataFileValue stats = memTable.minorCompact(conf, fs, tmpDatafile, extent);
      span.stop();
      span = Trace.start("bringOnline");
      datafileManager.bringMinorCompactionOnline(tmpDatafile, newDatafile, stats, commitSession);
      span.stop();
      return stats;
View Full Code Here

        majCStats.add(mcs);
       
        long size = FileOperations.getInstance().getFileSize(compactTmpName, fs, conf,
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString()));
       
        datafileManager.bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName, new DataFileValue(size, mcs.entriesWritten));
       
        // when major compaction produces a file w/ zero entries, it will be deleted... do not want
        // to add the deleted file
        if (filesToCompact.size() > 0 && mcs.entriesWritten > 0) {
          filesToCompact.put(fileName, size);
View Full Code Here

  public void importMapFiles(Map<String,MapFileInfo> fileMap) throws IOException {
    Map<String,DataFileValue> entries = new HashMap<String,DataFileValue>(fileMap.size());
   
    for (String path : fileMap.keySet()) {
      MapFileInfo mfi = fileMap.get(path);
      entries.put(path, new DataFileValue(mfi.estimatedSize, 0l));
    }
   
    // Clients timeout and will think that this operation failed.
    // Don't do it if we spent too long waiting for the lock
    long now = System.currentTimeMillis();
View Full Code Here

      }
     
      MetadataTable.updateTabletDataFile(extent, relSizes, SecurityConstants.systemCredentials, tabletServer.getLock());
     
      if (fs.globStatus(new Path(bulkDir + "/processing_proc_*")).length == 0) {
        DataFileValue zero = new DataFileValue(0, 0);
        MetadataTable.replaceDatafiles(extent, relSizes.keySet(), new HashSet<String>(), "junk", zero, SecurityConstants.systemCredentials,
            tabletServer.getClientAddressString(), lastLocation, tabletServer.getLock(), false);
        throw new IOException("Processing file does not exist, aborting bulk import " + extent + " " + bulkDir);
      }
     
      synchronized (Tablet.this) {
        for (Entry<String,DataFileValue> tpath : paths.entrySet()) {
          String path = relPaths.get(tpath.getKey());
          DataFileValue estSize = paths.get(tpath.getKey());
         
          if (datafileSizes.containsKey(path)) {
            log.error("Adding file that is already in set " + path);
          }
          datafileSizes.put(path, estSize);
View Full Code Here

       
        if (reportedProblem) {
          ProblemReports.getInstance().deleteProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname);
        }
       
        return new DataFileValue(fileOps.getFileSize(dirname, fs, conf,
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString())), entriesCompacted);
      } catch (IOException e) {
        log.warn("MinC failed (" + e.getMessage() + ") to create " + dirname + " retrying ...");
        ProblemReports.getInstance().report(new ProblemReport(extent.getTableId().toString(), ProblemType.FILE_WRITE, dirname, e));
        reportedProblem = true;
View Full Code Here

   
    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
   
    // default's file
    Key defaultFileKey = new Key(defaultExtent, Constants.METADATA_DATAFILE_COLUMN_FAMILY, new Text("/default_tablet/00000_00000.rf"), 0);
    rootTabletFile.append(defaultFileKey, new Value(new DataFileValue(0, 0).encode()));
   
    // default's directory
    Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(),
        0);
    rootTabletFile.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
View Full Code Here

        violations.add((short) 6);
      }
     
      if (columnFamily.equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
        try {
          DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
         
          if (dfv.getSize() < 0 || dfv.getNumEntries() < 0) {
            if (violations == null)
              violations = new ArrayList<Short>();
            violations.add((short) 1);
          }
        } catch (NumberFormatException nfe) {
View Full Code Here

      }
      log.debug("fs " + fs + " files: " + Arrays.toString(paths) + " location: " + location);
      Collection<String> goodPaths = cleanUpFiles(fs, files, location, true);
      for (String path : goodPaths) {
        String filename = new Path(path).getName();
        DataFileValue dfv = new DataFileValue(0, 0);
        datafiles.put(locText.toString() + "/" + filename, dfv);
      }
    } else {
     
      SortedMap<Key,Value> datafilesMetadata;
     
      Text rowName = extent.getMetadataEntry();
     
      if (tabletsKeyValues != null && tabletsKeyValues.size() > 0) {
        datafilesMetadata = new TreeMap<Key,Value>();
        for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
          if (entry.getKey().compareRow(rowName) == 0 && entry.getKey().compareColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY) == 0) {
            datafilesMetadata.put(new Key(entry.getKey()), new Value(entry.getValue()));
          }
        }
      } else {
       
        ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), Constants.METADATA_TABLE_ID,
            Constants.NO_AUTHS);
       
        // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
        // reduced batch size to improve performance
        // changed here after endKeys were implemented from 10 to 1000
        mdScanner.setBatchSize(1000);
       
        // leave these in, again, now using endKey for safety
        mdScanner.fetchColumnFamily(Constants.METADATA_DATAFILE_COLUMN_FAMILY);
       
        mdScanner.setRange(new Range(rowName));
       
        datafilesMetadata = new TreeMap<Key,Value>();
       
        for (Entry<Key,Value> entry : mdScanner) {
         
          if (entry.getKey().compareRow(rowName) != 0) {
            break;
          }
         
          datafilesMetadata.put(new Key(entry.getKey()), new Value(entry.getValue()));
        }
      }
     

      Iterator<Entry<Key,Value>> dfmdIter = datafilesMetadata.entrySet().iterator();
     
      while (dfmdIter.hasNext()) {
        Entry<Key,Value> entry = dfmdIter.next();
       
        datafiles.put(entry.getKey().getColumnQualifier().toString(), new DataFileValue(entry.getValue().get()));
      }
    }
   
    return datafiles;
  }
View Full Code Here

   
    try {
      Span span = Trace.start("write");
      count = memTable.getNumEntries();
     
      DataFileValue dfv = null;
      if (mergeFile != null)
        dfv = datafileManager.getDatafileSizes().get(mergeFile);
     
      MinorCompactor compactor = new MinorCompactor(conf, fs, memTable, mergeFile, dfv, tmpDatafile, acuTableConf, extent);
      CompactionStats stats = compactor.call();
     
      span.stop();
      span = Trace.start("bringOnline");
      datafileManager.bringMinorCompactionOnline(tmpDatafile, newDatafile, mergeFile, new DataFileValue(stats.getFileSize(), stats.getEntriesWritten()),
          commitSession, flushId);
      span.stop();
      return new DataFileValue(stats.getFileSize(), stats.getEntriesWritten());
    } catch (RuntimeException E) {
      failed = true;
      throw E;
    } catch (Error E) {
      // Weird errors like "OutOfMemoryError" when trying to create the thread for the compaction
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.util.MetadataTable.DataFileValue

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.