Package cloudtrace.instrument

Examples of cloudtrace.instrument.Span


            count += list.size();
          }
          String msg = "sending " + String.format("%,d", count) + " mutations to " + String.format("%,d", mutationBatch.size()) + " tablets at " + location;
          Thread.currentThread().setName(msg);
         
          Span span = Trace.start("sendMutations");
          try {
            long st1 = System.currentTimeMillis();
            failures = sendMutationsToTabletServer(location, mutationBatch);
            long st2 = System.currentTimeMillis();
            if (log.isTraceEnabled())
              log.trace("sent " + String.format("%,d", count) + " mutations to " + location + " in "
                  + String.format("%.2f secs (%,.2f mutations/sec) with %,d failures", (st2 - st1) / 1000.0, count / ((st2 - st1) / 1000.0), failures.size()));
           
            long successBytes = 0;
            for (Entry<KeyExtent,List<Mutation>> entry : mutationBatch.entrySet()) {
              for (Mutation mutation : entry.getValue()) {
                successBytes += mutation.estimatedMemoryUsed();
              }
            }
           
            if (failures.size() > 0) {
              failedMutations.add(failures);
              successBytes -= failures.getMemoryUsed();
            }
           
            updateSendStats(count, st2 - st1);
            decrementMemUsed(successBytes);
           
          } finally {
            span.stop();
          }
        } catch (IOException e) {
          if (log.isTraceEnabled())
            log.trace("failed to send mutations to " + location + " : " + e.getMessage());
         
View Full Code Here


 
  private void compactLocalityGroup(String lgName, Set<ByteSequence> columnFamilies, boolean inclusive, Configuration conf, FileSystem fs,
      Set<String> filesToCompact, boolean propogateDeletes, KeyExtent extent, String compactTmpName, FileSKVWriter mfw, MajorCompactionStats majCStats)
      throws IOException, MajorCompactionCanceledException {
    ArrayList<FileSKVIterator> readers = new ArrayList<FileSKVIterator>(filesToCompact.size());
    Span span = Trace.start("compact");
    try {
      long entriesCompacted = 0;
      List<SortedKeyValueIterator<Key,Value>> iters = openMapDataFiles(lgName, conf, fs, filesToCompact, extent, readers);
      CountingIterator citr = new CountingIterator(new MultiIterator(iters, extent.toDataRange()));
      DeletingIterator delIter = new DeletingIterator(citr, propogateDeletes);
     
      IteratorEnvironment iterEnv = new TabletIteratorEnvironment(IteratorScope.majc, !propogateDeletes, acuTableConf);
     
      SortedKeyValueIterator<Key,Value> itr = IteratorUtil.loadIterators(IteratorScope.majc, delIter, extent, acuTableConf, iterEnv);
     
      itr.seek(extent.toDataRange(), columnFamilies, inclusive);
     
      if (!inclusive) {
        mfw.startDefaultLocalityGroup();
      } else {
        mfw.startNewLocalityGroup(lgName, columnFamilies);
      }
     
      Span write = Trace.start("write");
      try {
        while (itr.hasTop() && isCompactionEnabled()) {
          mfw.append(itr.getTopKey(), itr.getTopValue());
          itr.next();
          entriesCompacted++;
        }
       
        if (itr.hasTop() && !isCompactionEnabled()) {
          // cancel major compaction operation
          try {
            try {
              mfw.close();
            } catch (IOException e) {
              log.error(e, e);
            }
            fs.delete(new Path(compactTmpName), true);
          } catch (Exception e) {
            log.warn("Failed to delete Canceled major compaction output file " + compactTmpName, e);
          }
          throw new MajorCompactionCanceledException();
        }
       
      } finally {
        MajorCompactionStats lgMajcStats = new MajorCompactionStats(citr.getCount(), entriesCompacted);
        majCStats.add(lgMajcStats);
        write.stop();
      }
     
    } finally {
      // close sequence files opened
      for (FileSKVIterator reader : readers) {
View Full Code Here

      Set<String> smallestFiles = removeSmallest(filesToCompact, numToCompact);
     
      String fileName = fileNames.remove(0);
      String compactTmpName = fileName + "_tmp";
     
      Span span = Trace.start("compactFiles");
      try {
        MajorCompactionStats mcs = compactFiles(conf, fs, smallestFiles, filesToCompact.size() == 0 ? propogateDeletes : true, // always propagate
                                                                                                                               // deletes,
                                                                                                                               // unless last batch
            extent, compactTmpName);
        span.data("files", "" + smallestFiles.size());
        span.data("read", "" + mcs.entriesRead);
        span.data("written", "" + mcs.entriesWritten);
        majCStats.add(mcs);
       
        long size = FileOperations.getInstance().getFileSize(compactTmpName, fs, conf,
            AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId().toString()));
       
        datafileManager.bringMajorCompactionOnline(smallestFiles, compactTmpName, fileName, new DataFileValue(size, mcs.entriesWritten));
       
        // when major compaction produces a file w/ zero entries, it will be deleted... do not want
        // to add the deleted file
        if (filesToCompact.size() > 0 && mcs.entriesWritten > 0) {
          filesToCompact.put(fileName, size);
        }
      } finally {
        span.stop();
      }
     
    }
   
    return majCStats;
View Full Code Here

  private MajorCompactionStats majorCompact(boolean idleCompaction) {
   
    MajorCompactionStats majCStats = null;
   
    // Always trace majC
    Span span = Trace.on("majorCompaction");
    try {
      synchronized (this) {
        // check that compaction is still needed - defer to splitting
        majorCompactionQueued = false;
       
        if (closing || closed || !needsMajorCompaction(idleCompaction) || majorCompactionInProgress || needsSplit()) {
          return null;
        }
       
        majorCompactionInProgress = true;
      }
     
      majCStats = _majorCompact(idleCompaction);
    } catch (MajorCompactionCanceledException mcce) {
      log.debug("Major compaction canceled, extent = " + getExtent());
      throw new RuntimeException(mcce);
    } catch (Throwable t) {
      log.error("MajC Failed, extent = " + getExtent());
      log.error("MajC Failed, message = " + (t.getMessage() == null ? t.getClass().getName() : t.getMessage()), t);
      throw new RuntimeException(t);
    } finally {
      // ensure we always reset boolean, even
      // when an exception is thrown
      synchronized (this) {
        majorCompactionInProgress = false;
        this.notifyAll();
      }
      Span curr = Trace.currentTrace();
      curr.data("extent", "" + getExtent());
      curr.data("read", "" + majCStats.entriesRead);
      curr.data("written", "" + majCStats.entriesWritten);
      span.stop();
    }
   
    return majCStats;
  }
View Full Code Here

   
    TreeSet<String> waitForScansToFinish(Set<String> pathsToWaitFor, boolean blockNewScans, long maxWaitTime) {
      long startTime = System.currentTimeMillis();
      TreeSet<String> inUse = new TreeSet<String>();
     
      Span waitForScans = Trace.start("waitForScans");
      synchronized (Tablet.this) {
        if (blockNewScans) {
          if (reservationsBlocked)
            throw new IllegalStateException();
         
          reservationsBlocked = true;
        }
       
        for (String path : pathsToWaitFor) {
          while (fileScanReferenceCounts.get(path) > 0 && System.currentTimeMillis() - startTime < maxWaitTime) {
            try {
              Tablet.this.wait(100);
            } catch (InterruptedException e) {
              log.warn(e, e);
            }
          }
        }
       
        for (String path : pathsToWaitFor) {
          if (fileScanReferenceCounts.get(path) > 0)
            inUse.add(path);
        }
       
        if (blockNewScans) {
          reservationsBlocked = false;
          Tablet.this.notifyAll();
        }
       
      }
      waitForScans.stop();
      return inUse;
    }
View Full Code Here

   
    while (true) {
      if (sampler.next())
        Trace.on("gc");
     
      Span gcSpan = Trace.start("loop");
      tStart = System.currentTimeMillis();
      try {
        // STEP 1: gather candidates
        System.gc(); // make room
        candidateMemExceeded = false;
        checkForBulkProcessingFiles = false;
       
        Span candidatesSpan = Trace.start("getCandidates");
        status.current.started = System.currentTimeMillis();
        SortedSet<String> candidates = getCandidates();
        status.current.candidates = candidates.size();
        candidatesSpan.stop();
       
        // STEP 2: confirm deletes
        // WARNING: This line is EXTREMELY IMPORTANT.
        // You MUST confirm candidates are okay to delete
        Span confirmDeletesSpan = Trace.start("confirmDeletes");
        confirmDeletes(candidates);
        status.current.inUse = status.current.candidates - candidates.size();
        confirmDeletesSpan.stop();
       
        // STEP 3: delete files
        if (safemode) {
          if (verbose)
            System.out.println("SAFEMODE: There are " + candidates.size() + " data file candidates marked for deletion.\n"
                + "          Examine the log files to identify them.\n" + "          They can be removed by executing: bin/accumulo gc --offline\n"
                + "WARNING:  Do not run the garbage collector in offline mode unless you are positive\n"
                + "          that the accumulo METADATA table is in a clean state, or that accumulo\n"
                + "          has not yet been run, in the case of an upgrade.");
          log.info("SAFEMODE: Listing all data file candidates for deletion");
          for (String s : candidates)
            log.info("SAFEMODE: " + s);
          log.info("SAFEMODE: End candidates for deletion");
        } else {
          Span deleteSpan = Trace.start("deleteFiles");
          deleteFiles(candidates);
          log.info("Number of data file candidates for deletion: " + status.current.candidates);
          log.info("Number of data file candidates still in use: " + status.current.inUse);
          log.info("Number of successfully deleted data files: " + status.current.deleted);
          log.info("Number of data files delete failures: " + status.current.errors);
          deleteSpan.stop();
         
          // check bulk dirs we just to deleted files from to see if empty
          deleteEmptyBulkDirs(candidates);
        }
       
        status.current.finished = System.currentTimeMillis();
        status.last = status.current;
        status.current = new GcCycleStats();
       
      } catch (Exception e) {
        log.error(e, e);
      }
      tStop = System.currentTimeMillis();
      log.info(String.format("Collect cycle took %.2f seconds", ((tStop - tStart) / 1000.0)));
     
      if (offline)
        break;
     
      if (candidateMemExceeded) {
        log.info("Gathering of candidates was interrupted due to memory shortage. Bypassing cycle delay to collect the remaining candidates.");
        continue;
      }
     
      // Clean up any unused write-ahead logs
      Span waLogs = Trace.start("walogs");
      try {
        log.info("Beginning garbage collection of write-ahead logs");
        GarbageCollectWriteAheadLogs.collect(fs, status);
      } catch (Exception e) {
        log.error(e, e);
      }
      waLogs.stop();
      gcSpan.stop();
     
      Trace.offNoFlush();
      try {
        log.debug("Sleeping for " + gcDelay + " milliseconds");
View Full Code Here

public class GarbageCollectWriteAheadLogs {
  private static final Logger log = Logger.getLogger(GarbageCollectWriteAheadLogs.class);
 
  public static void collect(FileSystem fs, GCStatus status) {
   
    Span span = Trace.start("scanServers");
    try {
      status.currentLog.started = System.currentTimeMillis();
     
      Map<String,String> fileToServerMap = new HashMap<String,String>();
      int count = scanServers(fileToServerMap);
      long fileScanStop = System.currentTimeMillis();
      log.info(String.format("Fetched %d files from %d servers in %.2f seconds", fileToServerMap.size(), count,
          (fileScanStop - status.currentLog.started) / 1000.));
      status.currentLog.candidates = fileToServerMap.size();
      span.stop();
     
      span = Trace.start("removeMetadataEntries");
      try {
        count = removeMetadataEntries(fileToServerMap, status);
      } catch (Exception ex) {
        log.error("Unable to scan metadata table", ex);
        return;
      } finally {
        span.stop();
      }
     
      long logEntryScanStop = System.currentTimeMillis();
      log.info(String.format("%d log entries scanned in %.2f seconds", count, (logEntryScanStop - fileScanStop) / 1000.));
     
      span = Trace.start("removeFiles");
      Map<String,ArrayList<String>> serverToFileMap = mapServersToFiles(fileToServerMap);
     
      count = removeFiles(fs, serverToFileMap, status);
     
      long removeStop = System.currentTimeMillis();
      log.info(String.format("%d total logs removed from %d servers in %.2f seconds", count, serverToFileMap.size(), (removeStop - logEntryScanStop) / 1000.));
      status.currentLog.finished = removeStop;
      status.lastLog = status.currentLog;
      status.currentLog = new GcCycleStats();
      span.stop();
     
    } catch (Exception e) {
      log.error("exception occured while garbage collecting write ahead logs", e);
      span.stop();
    }
  }
View Full Code Here

     
      private void writeSortedEntries(Path dest, int part, final List<Pair<LogFileKey,LogFileValue>> kv) throws IOException {
        String path = dest + String.format("/part-r-%05d", part);
        log.debug("Writing partial log file to DSF " + path);
        log.debug("Sorting");
        Span span = Trace.start("Logger sort");
        span.data("logfile", dest.getName());
        Collections.sort(kv, new Comparator<Pair<LogFileKey,LogFileValue>>() {
          @Override
          public int compare(Pair<LogFileKey,LogFileValue> o1, Pair<LogFileKey,LogFileValue> o2) {
            return o1.getFirst().compareTo(o2.getFirst());
          }
        });
        span.stop();
        span = Trace.start("Logger write");
        span.data("logfile", dest.getName());
        MapFile.Writer writer = new MapFile.Writer(conf, fs, path, LogFileKey.class, LogFileValue.class);
        short replication = 1;
        fs.setReplication(new Path(path + "/" + MapFile.DATA_FILE_NAME), replication);
        fs.setReplication(new Path(path + "/" + MapFile.INDEX_FILE_NAME), replication);
        try {
          for (Pair<LogFileKey,LogFileValue> entry : kv)
            writer.append(entry.getFirst(), entry.getSecond());
        } finally {
          writer.close();
          span.stop();
        }
      }
     
      private void copyLog(final String localLog, final String fullyQualifiedFileName) throws IOException {
        Path dest = new Path(fullyQualifiedFileName + ".copy");
View Full Code Here

TOP

Related Classes of cloudtrace.instrument.Span

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.