Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileOperations


      return null;
    return new CompactionTuple(toCompact, toCompact.size() == files.size());
  }
 
  private Map<String,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<String,DataFileValue> files) throws IOException {
    FileOperations fileFactory = FileOperations.getInstance();
   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here


    String oldThreadName = Thread.currentThread().getName();
    String newThreadName = "MajC compacting " + extent.toString() + " started " + dateFormatter.format(new Date()) + " file: " + outputFile;
    Thread.currentThread().setName(newThreadName);
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      mfw = fileFactory.openWriter(outputFile, fs, conf, acuTableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          setLocalityGroup(entry.getKey());
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      setLocalityGroup("");
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, acuTableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
      log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
          majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
     
      majCStats.setFileSize(fileFactory.getFileSize(outputFile, fs, conf, acuTableConf));
      return majCStats;
    } catch (IOException e) {
      log.error(e, e);
      throw e;
    } catch (RuntimeException e) {
View Full Code Here

    List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
   
    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, acuTableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
View Full Code Here

    FileSKVWriter mfw = null;
   
    CompactionStats majCStats = new CompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
      mfw = fileFactory.openWriter(outputFile, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, tableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
      log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
          majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
     
      majCStats.setFileSize(fileFactory.getFileSize(outputFile, fs, conf, ServerConfiguration.getTableConfiguration(extent.getTableId().toString())));
      return majCStats;
    } catch (IOException e) {
      log.error(e, e);
      throw e;
    } catch (RuntimeException e) {
View Full Code Here

    List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
   
    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, tableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
View Full Code Here

      return null;
    return new CompactionTuple(toCompact, toCompact.size() == files.size());
  }
 
  private Map<String,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<String,DataFileValue> files) throws IOException {
    FileOperations fileFactory = FileOperations.getInstance();
   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

    boolean remove = runningCompactions.add(this);
   
    clearStats();

    try {
      FileOperations fileFactory = FileOperations.getInstance();
      mfw = fileFactory.openWriter(outputFile, fs, conf, acuTableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          setLocalityGroup(entry.getKey());
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      setLocalityGroup("");
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, acuTableConf);
        openReader.close();
      } catch (IOException ex) {
        log.error("Verification of successful compaction fails!!! " + extent + " " + outputFile, ex);
        throw ex;
      }
     
      log.debug(String.format("Compaction %s %,d read | %,d written | %,6d entries/sec | %6.3f secs", extent, majCStats.getEntriesRead(),
          majCStats.getEntriesWritten(), (int) (majCStats.getEntriesRead() / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0));
     
      majCStats.setFileSize(fileFactory.getFileSize(outputFile, fs, conf, acuTableConf));
      return majCStats;
    } catch (IOException e) {
      log.error(e, e);
      throw e;
    } catch (RuntimeException e) {
View Full Code Here

    List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(filesToCompact.size());
   
    for (String mapFile : filesToCompact.keySet()) {
      try {
       
        FileOperations fileFactory = FileOperations.getInstance();
       
        FileSKVIterator reader;
       
        reader = fileFactory.openReader(mapFile, false, fs, conf, acuTableConf);
       
        readers.add(reader);
       
        SortedKeyValueIterator<Key,Value> iter = new ProblemReportingIterator(extent.getTableId().toString(), mapFile, false, reader);
       
View Full Code Here

      return null;
    return new CompactionTuple(toCompact, toCompact.size() == files.size());
  }
 
  private Map<String,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<String,DataFileValue> files) throws IOException {
    FileOperations fileFactory = FileOperations.getInstance();
   
    Map<String,Pair<Key,Key>> falks = new HashMap<String,Pair<Key,Key>>();
   
    for (Entry<String,DataFileValue> entry : files.entrySet()) {
      String file = entry.getKey();
      FileSKVIterator openReader = fileFactory.openReader(file, true, fs, conf, acuTableConf);
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        falks.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

    return common;
  }

  private Map<FileRef,Pair<Key,Key>> getFirstAndLastKeys(SortedMap<FileRef,DataFileValue> allFiles) throws IOException {
    Map<FileRef,Pair<Key,Key>> result = new HashMap<FileRef,Pair<Key,Key>>();
    FileOperations fileFactory = FileOperations.getInstance();
    for (Entry<FileRef,DataFileValue> entry : allFiles.entrySet()) {
      FileRef file = entry.getKey();
      FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
      FileSKVIterator openReader = fileFactory.openReader(file.path().toString(), true, ns, ns.getConf(), this.getTableConfiguration());
      try {
        Key first = openReader.getFirstKey();
        Key last = openReader.getLastKey();
        result.put(file, new Pair<Key,Key>(first, last));
      } finally {
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileOperations

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.