Package org.htrace

Examples of org.htrace.TraceScope


    BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset);

    boolean useLock = false;
    IdLock.Entry lockEntry = null;
    TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
    try {
      while (true) {
        if (useLock) {
          lockEntry = offsetLock.getLockEntry(dataBlockOffset);
        }

        // Check cache for block. If found return.
        if (cacheConf.isBlockCacheEnabled()) {
          // Try and get the block from the block cache. If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, useLock, isCompaction,
            updateCacheMetrics, expectedBlockType, expectedDataBlockEncoding);
          if (cachedBlock != null) {
            assert cachedBlock.isUnpacked() : "Packed block leak.";
            if (cachedBlock.getBlockType().isData()) {
              if (updateCacheMetrics) {
                HFile.dataBlockReadCnt.incrementAndGet();
              }
              // Validate encoding type for data blocks. We include encoding
              // type in the cache key, and we expect it to match on a cache hit.
              if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
                throw new IOException("Cached block under key " + cacheKey + " "
                  + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
                  + dataBlockEncoder.getDataBlockEncoding() + ")");
              }
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }
        if (Trace.isTracing()) {
          traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
        }
        // Load block from filesystem.
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
            pread);
        validateBlockType(hfileBlock, expectedBlockType);
        HFileBlock unpacked = hfileBlock.unpack(hfileContext, fsBlockReader);
        BlockType.BlockCategory category = hfileBlock.getBlockType().getCategory();

        // Cache the block if necessary
        if (cacheBlock && cacheConf.shouldCacheBlockOnRead(category)) {
          cacheConf.getBlockCache().cacheBlock(cacheKey,
            cacheConf.shouldCacheCompressed(category) ? hfileBlock : unpacked,
            cacheConf.isInMemory(), this.cacheConf.isCacheDataInL1());
        }

        if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
          HFile.dataBlockReadCnt.incrementAndGet();
        }

        return unpacked;
      }
    } finally {
      traceScope.close();
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
View Full Code Here


   * limit. If so, flush regions with the biggest memstores until we're down
   * to the lower limit. This method blocks callers until we're down to a safe
   * amount of memstore consumption.
   */
  public void reclaimMemStoreMemory() {
    TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
    if (isAboveHighWaterMark()) {
      if (Trace.isTracing()) {
        scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
      }
      long start = EnvironmentEdgeManager.currentTime();
      synchronized (this.blockSignal) {
        boolean blocked = false;
        long startTime = 0;
        boolean interrupted = false;
        try {
          while (isAboveHighWaterMark() && !server.isStopped()) {
            if (!blocked) {
              startTime = EnvironmentEdgeManager.currentTime();
              LOG.info("Blocking updates on " + server.toString() +
                ": the global memstore size " +
                StringUtils.humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreSize()) +
                " is >= than blocking " +
                StringUtils.humanReadableInt(globalMemStoreLimit) + " size");
            }
            blocked = true;
            wakeupFlushThread();
            try {
              // we should be able to wait forever, but we've seen a bug where
              // we miss a notify, so put a 5 second bound on it at least.
              blockSignal.wait(5 * 1000);
            } catch (InterruptedException ie) {
              LOG.warn("Interrupted while waiting");
              interrupted = true;
            }
            long took = EnvironmentEdgeManager.currentTime() - start;
            LOG.warn("Memstore is above high water mark and block " + took + "ms");
          }
        } finally {
          if (interrupted) {
            Thread.currentThread().interrupt();
          }
        }

        if(blocked){
          final long totalTime = EnvironmentEdgeManager.currentTime() - startTime;
          if(totalTime > 0){
            this.updatesBlockedMsHighWater.add(totalTime);
          }
          LOG.info("Unblocking updates for server " + server.toString());
        }
      }
    } else if (isAboveLowWaterMark()) {
      wakeupFlushThread();
    }
    scope.close();
  }
View Full Code Here

   * This function will not throw NoNodeException if the path does not
   * exist.
   */
  public void delete(String path, int version)
  throws InterruptedException, KeeperException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.delete");
      RetryCounter retryCounter = retryCounterFactory.create();
      boolean isRetry = false; // False for first attempt, true for all retries.
      while (true) {
        try {
          checkZk().delete(path, version);
          return;
        } catch (KeeperException e) {
          switch (e.code()) {
            case NONODE:
              if (isRetry) {
                LOG.info("Node " + path + " already deleted. Assuming a " +
                    "previous attempt succeeded.");
                return;
              }
              LOG.info("Node " + path + " already deleted, retry=" + isRetry);
              throw e;

            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "delete");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
        isRetry = true;
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * exists is an idempotent operation. Retry before throwing exception
   * @return A Stat instance
   */
  public Stat exists(String path, Watcher watcher)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          return checkZk().exists(path, watcher);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "exists");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * exists is an idempotent operation. Retry before throwing exception
   * @return A Stat instance
   */
  public Stat exists(String path, boolean watch)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.exists");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          return checkZk().exists(path, watch);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "exists");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * getChildren is an idempotent operation. Retry before throwing exception
   * @return List of children znodes
   */
  public List<String> getChildren(String path, Watcher watcher)
    throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          return checkZk().getChildren(path, watcher);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "getChildren");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * getChildren is an idempotent operation. Retry before throwing exception
   * @return List of children znodes
   */
  public List<String> getChildren(String path, boolean watch)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          return checkZk().getChildren(path, watch);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "getChildren");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * getData is an idempotent operation. Retry before throwing exception
   * @return Data
   */
  public byte[] getData(String path, Watcher watcher, Stat stat)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          byte[] revData = checkZk().getData(path, watcher, stat);
          return this.removeMetaData(revData);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "getData");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * getData is an idemnpotent operation. Retry before throwing exception
   * @return Data
   */
  public byte[] getData(String path, boolean watch, Stat stat)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.getData");
      RetryCounter retryCounter = retryCounterFactory.create();
      while (true) {
        try {
          byte[] revData = checkZk().getData(path, watch, stat);
          return this.removeMetaData(revData);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "getData");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

   * badversion is caused by the result of previous correctly setData
   * @return Stat instance
   */
  public Stat setData(String path, byte[] data, int version)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.setData");
      RetryCounter retryCounter = retryCounterFactory.create();
      byte[] newData = appendMetaData(data);
      boolean isRetry = false;
      while (true) {
        try {
          return checkZk().setData(path, newData, version);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "setData");
              break;
            case BADVERSION:
              if (isRetry) {
                // try to verify whether the previous setData success or not
                try{
                  Stat stat = new Stat();
                  byte[] revData = checkZk().getData(path, false, stat);
                  if(Bytes.compareTo(revData, newData) == 0) {
                    // the bad version is caused by previous successful setData
                    return stat;
                  }
                } catch(KeeperException keeperException){
                  // the ZK is not reliable at this moment. just throwing exception
                  throw keeperException;
                }
              }
            // throw other exceptions and verified bad version exceptions
            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
        isRetry = true;
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

TOP

Related Classes of org.htrace.TraceScope

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.