Package org.cloudera.htrace

Examples of org.cloudera.htrace.TraceScope


  /**
   * Run multiple operations in a transactional manner. Retry before throwing exception
   */
  public List<OpResult> multi(Iterable<Op> ops)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.multi");
      RetryCounter retryCounter = retryCounterFactory.create();
      Iterable<Op> multiOps = prepareZKMulti(ops);
      while (true) {
        try {
          return checkZk().multi(multiOps);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "multi");
              break;

            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
    }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here


      }
      Throwable errorThrowable = null;
      String error = null;
      Pair<Message, CellScanner> resultPair = null;
      RpcServer.CurCall.set(call);
      TraceScope traceScope = null;
      try {
        if (!this.rpcServer.isStarted()) {
          throw new ServerNotRunningYetException("Server is not running yet");
        }
        if (call.tinfo != null) {
          traceScope = Trace.startSpan(call.toTraceString(), call.tinfo);
        }
        RequestContext.set(userProvider.create(call.connection.user), RpcServer.getRemoteIp(),
          call.connection.service);
        // make the call
        resultPair = this.rpcServer.call(call.service, call.md, call.param, call.cellScanner,
          call.timestamp, this.status);
      } catch (Throwable e) {
        RpcServer.LOG.debug(Thread.currentThread().getName() + ": " + call.toShortString(), e);
        errorThrowable = e;
        error = StringUtils.stringifyException(e);
      } finally {
        if (traceScope != null) {
          traceScope.close();
        }
        // Must always clear the request context to avoid leaking
        // credentials between requests.
        RequestContext.clear();
      }
View Full Code Here

            dataBlockEncoder.getDataBlockEncoding(),
            expectedBlockType);

    boolean useLock = false;
    IdLock.Entry lockEntry = null;
    TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
    try {
      while (true) {
        if (useLock) {
          lockEntry = offsetLock.getLockEntry(dataBlockOffset);
        }

        // Check cache for block. If found return.
        if (cacheConf.isBlockCacheEnabled()) {
          // Try and get the block from the block cache. If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
            cacheBlock, useLock, updateCacheMetrics);
          if (cachedBlock != null) {
            validateBlockType(cachedBlock, expectedBlockType);
            if (cachedBlock.getBlockType().isData()) {
              HFile.dataBlockReadCnt.incrementAndGet();

              // Validate encoding type for data blocks. We include encoding
              // type in the cache key, and we expect it to match on a cache hit.
              if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
                throw new IOException("Cached block under key " + cacheKey + " "
                  + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
                  + dataBlockEncoder.getDataBlockEncoding() + ")");
              }
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }
        if (Trace.isTracing()) {
          traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
        }
        // Load block from filesystem.
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
            pread);
        validateBlockType(hfileBlock, expectedBlockType);

        // Cache the block if necessary
        if (cacheBlock && cacheConf.shouldCacheBlockOnRead(hfileBlock.getBlockType().getCategory())) {
          cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
        }

        if (updateCacheMetrics && hfileBlock.getBlockType().isData()) {
          HFile.dataBlockReadCnt.incrementAndGet();
        }

        return hfileBlock;
      }
    } finally {
      traceScope.close();
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
View Full Code Here

      AtomicLong sequenceId, long nonceGroup, long nonce) throws IOException {
      if (edits.isEmpty()) return this.unflushedEntries.get();
      if (this.closed) {
        throw new IOException("Cannot append; log is closed");
      }
      TraceScope traceScope = Trace.startSpan("FSHlog.append");
      try {
        long txid = 0;
        synchronized (this.updateLock) {
          // get the sequence number from the passed Long. In normal flow, it is coming from the
          // region.
          long seqNum = sequenceId.incrementAndGet();
          // The 'lastSeqWritten' map holds the sequence number of the oldest
          // write for each region (i.e. the first edit added to the particular
          // memstore). . When the cache is flushed, the entry for the
          // region being flushed is removed if the sequence number of the flush
          // is greater than or equal to the value in lastSeqWritten.
          // Use encoded name.  Its shorter, guaranteed unique and a subset of
          // actual  name.
          byte [] encodedRegionName = info.getEncodedNameAsBytes();
          if (isInMemstore) this.oldestUnflushedSeqNums.putIfAbsent(encodedRegionName, seqNum);
          HLogKey logKey = makeKey(
            encodedRegionName, tableName, seqNum, now, clusterIds, nonceGroup, nonce);

          synchronized (pendingWritesLock) {
            doWrite(info, logKey, edits, htd);
            txid = this.unflushedEntries.incrementAndGet();
          }
          this.numEntries.incrementAndGet();
          this.asyncWriter.setPendingTxid(txid);

          if (htd.isDeferredLogFlush()) {
            lastUnSyncedTxid = txid;
          }
          this.latestSequenceNums.put(encodedRegionName, seqNum);
        }
        // TODO: note that only tests currently call append w/sync.
        //       Therefore, this code here is not actually used by anything.
        // Sync if catalog region, and if not then check if that table supports
        // deferred log flushing
        if (doSync &&
            (info.isMetaRegion() ||
            !htd.isDeferredLogFlush())) {
          // sync txn to file system
          this.sync(txid);
        }
        return txid;
      } finally {
        traceScope.close();
      }
    }
View Full Code Here

        // watch our sink so we know when commits happen
        CountDownLatch latch = new CountDownLatch(1);
        waitForCommit(latch);

        // write some spans
        TraceScope trace = Trace.startSpan("Start write test", Sampler.ALWAYS);
        Span span = trace.getSpan();

        // add a child with some annotations
        Span child = span.child("child 1");
        child.addTimelineAnnotation("timeline annotation");
        TracingCompat.addAnnotation(child, "test annotation", 10);
        child.stop();

        // sleep a little bit to get some time difference
        Thread.sleep(100);

        trace.close();

        // pass the trace on
        receiver.receiveSpan(span);

        // wait for the tracer to actually do the write
View Full Code Here

        long[] serverTimeStamps = validate();
        Iterator<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> iterator = this.mutations.entrySet().iterator();
        List<Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>>> committedList = Lists.newArrayListWithCapacity(this.mutations.size());

        // add tracing for this operation
        TraceScope trace = Tracing.startNewSpan(connection, "Committing mutations to tables");
        Span span = trace.getSpan();
        while (iterator.hasNext()) {
            Map.Entry<TableRef, Map<ImmutableBytesPtr,Map<PColumn,byte[]>>> entry = iterator.next();
            Map<ImmutableBytesPtr,Map<PColumn,byte[]>> valuesMap = entry.getValue();
            TableRef tableRef = entry.getKey();
            PTable table = tableRef.getTable();
            table.getIndexMaintainers(tempPtr);
            boolean hasIndexMaintainers = tempPtr.getLength() > 0;
            boolean isDataTable = true;
            long serverTimestamp = serverTimeStamps[i++];
            Iterator<Pair<byte[],List<Mutation>>> mutationsIterator = addRowMutations(tableRef, valuesMap, serverTimestamp, false);
            while (mutationsIterator.hasNext()) {
                Pair<byte[],List<Mutation>> pair = mutationsIterator.next();
                byte[] htableName = pair.getFirst();
                List<Mutation> mutations = pair.getSecond();
               
                //create a span per target table
                //TODO maybe we can be smarter about the table name to string here?
                Span child = Tracing.child(span,"Writing mutation batch for table: "+Bytes.toString(htableName));

                int retryCount = 0;
                boolean shouldRetry = false;
                do {
                    ServerCache cache = null;
                    if (hasIndexMaintainers && isDataTable) {
                        byte[] attribValue = null;
                        byte[] uuidValue;
                        if (IndexMetaDataCacheClient.useIndexMetadataCache(connection, mutations, tempPtr.getLength())) {
                            IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                            cache = client.addIndexMetadataCache(mutations, tempPtr);
                            child.addTimelineAnnotation("Updated index metadata cache");
                            uuidValue = cache.getId();
                            // If we haven't retried yet, retry for this case only, as it's possible that
                            // a split will occur after we send the index metadata cache to all known
                            // region servers.
                            shouldRetry = true;
                        } else {
                            attribValue = ByteUtil.copyKeyBytesIfNecessary(tempPtr);
                            uuidValue = ServerCacheClient.generateId();
                        }
                        // Either set the UUID to be able to access the index metadata from the cache
                        // or set the index metadata directly on the Mutation
                        for (Mutation mutation : mutations) {
                            if (tenantId != null) {
                                mutation.setAttribute(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
                            }
                            mutation.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                            if (attribValue != null) {
                                mutation.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
                            }
                        }
                    }
                   
                    SQLException sqlE = null;
                    HTableInterface hTable = connection.getQueryServices().getTable(htableName);
                    try {
                        if (logger.isDebugEnabled()) logMutationSize(hTable, mutations);
                        long startTime = System.currentTimeMillis();
                        child.addTimelineAnnotation("Attempt " + retryCount);
                        hTable.batch(mutations);
                        child.stop();
                        shouldRetry = false;
                        if (logger.isDebugEnabled()) logger.debug("Total time for batch call of  " + mutations.size() + " mutations into " + table.getName().getString() + ": " + (System.currentTimeMillis() - startTime) + " ms");
                        committedList.add(entry);
                    } catch (Exception e) {
                        SQLException inferredE = ServerUtil.parseServerExceptionOrNull(e);
                        if (inferredE != null) {
                            if (shouldRetry && retryCount == 0 && inferredE.getErrorCode() == SQLExceptionCode.INDEX_METADATA_NOT_FOUND.getErrorCode()) {
                                // Swallow this exception once, as it's possible that we split after sending the index metadata
                                // and one of the region servers doesn't have it. This will cause it to have it the next go around.
                                // If it fails again, we don't retry.
                                String msg = "Swallowing exception and retrying after clearing meta cache on connection. " + inferredE;
                                logger.warn(msg);
                                connection.getQueryServices().clearTableRegionCache(htableName);

                                // add a new child span as this one failed
                                child.addTimelineAnnotation(msg);
                                child.stop();
                                child = Tracing.child(span,"Failed batch, attempting retry");

                                continue;
                            }
                            e = inferredE;
                        }
                        // Throw to client with both what was committed so far and what is left to be committed.
                        // That way, client can either undo what was done or try again with what was not done.
                        sqlE = new CommitException(e, this, new MutationState(committedList, this.sizeOffset, this.maxSize, this.connection));
                    } finally {
                        try {
                            hTable.close();
                        } catch (IOException e) {
                            if (sqlE != null) {
                                sqlE.setNextException(ServerUtil.parseServerException(e));
                            } else {
                                sqlE = ServerUtil.parseServerException(e);
                            }
                        } finally {
                            try {
                                if (cache != null) {
                                    cache.close();
                                }
                            } finally {
                                if (sqlE != null) {
                                    throw sqlE;
                                }
                            }
                        }
                    }
                } while (shouldRetry && retryCount++ < 1);
                isDataTable = false;
            }
            numRows -= entry.getValue().size();
            iterator.remove(); // Remove batches as we process them
        }
        trace.close();
        assert(numRows==0);
        assert(this.mutations.isEmpty());
    }
View Full Code Here

                }
            }
        };

        // wrap the iterator so we start/end tracing as we expect
        TraceScope scope =
                Tracing.startNewSpan(context.getConnection(), "Creating basic query for "
                        + getPlanSteps(iterator));
        return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
    }
View Full Code Here

  public EventHandler prepare() throws Exception {
    return this;
  }

  public void run() {
    TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
    try {
      if (getListener() != null) getListener().beforeProcess(this);
      process();
      if (getListener() != null) getListener().afterProcess(this);
    } catch(Throwable t) {
      handleException(t);
    } finally {
      chunk.close();
    }
  }
View Full Code Here

            dataBlockEncoder.getDataBlockEncoding(),
            expectedBlockType);

    boolean useLock = false;
    IdLock.Entry lockEntry = null;
    TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock");
    try {
      while (true) {
        if (useLock) {
          lockEntry = offsetLock.getLockEntry(dataBlockOffset);
        }

        // Check cache for block. If found return.
        if (cacheConf.isBlockCacheEnabled()) {
          // Try and get the block from the block cache. If the useLock variable is true then this
          // is the second time through the loop and it should not be counted as a block cache miss.
          HFileBlock cachedBlock = (HFileBlock) cacheConf.getBlockCache().getBlock(cacheKey,
              cacheBlock, useLock);
          if (cachedBlock != null) {
            validateBlockType(cachedBlock, expectedBlockType);
            if (cachedBlock.getBlockType().isData()) {
              HFile.dataBlockReadCnt.incrementAndGet();

              // Validate encoding type for data blocks. We include encoding
              // type in the cache key, and we expect it to match on a cache hit.
              if (cachedBlock.getDataBlockEncoding() != dataBlockEncoder.getDataBlockEncoding()) {
                throw new IOException("Cached block under key " + cacheKey + " "
                  + "has wrong encoding: " + cachedBlock.getDataBlockEncoding() + " (expected: "
                  + dataBlockEncoder.getDataBlockEncoding() + ")");
              }
            }
            return cachedBlock;
          }
          // Carry on, please load.
        }
        if (!useLock) {
          // check cache again with lock
          useLock = true;
          continue;
        }
        if (Trace.isTracing()) {
          traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
        }
        // Load block from filesystem.
        long startTimeNs = System.nanoTime();
        HFileBlock hfileBlock = fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, -1,
            pread);
        validateBlockType(hfileBlock, expectedBlockType);

        final long delta = System.nanoTime() - startTimeNs;
        HFile.offerReadLatency(delta, pread);

        // Cache the block if necessary
        if (cacheBlock && cacheConf.shouldCacheBlockOnRead(hfileBlock.getBlockType().getCategory())) {
          cacheConf.getBlockCache().cacheBlock(cacheKey, hfileBlock, cacheConf.isInMemory());
        }

        if (hfileBlock.getBlockType().isData()) {
          HFile.dataBlockReadCnt.incrementAndGet();
        }

        return hfileBlock;
      }
    } finally {
      traceScope.close();
      if (lockEntry != null) {
        offsetLock.releaseLockEntry(lockEntry);
      }
    }
  }
View Full Code Here

   * badversion is caused by the result of previous correctly setData
   * @return Stat instance
   */
  public Stat setData(String path, byte[] data, int version)
  throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
      traceScope = Trace.startSpan("RecoverableZookeeper.setData");
      RetryCounter retryCounter = retryCounterFactory.create();
      byte[] newData = appendMetaData(data);
      boolean isRetry = false;
      while (true) {
        try {
          return zk.setData(path, newData, version);
        } catch (KeeperException e) {
          switch (e.code()) {
            case CONNECTIONLOSS:
            case SESSIONEXPIRED:
            case OPERATIONTIMEOUT:
              retryOrThrow(retryCounter, e, "setData");
              break;
            case BADVERSION:
              if (isRetry) {
                // try to verify whether the previous setData success or not
                try{
                  Stat stat = new Stat();
                  byte[] revData = zk.getData(path, false, stat);
                  if(Bytes.compareTo(revData, newData) == 0) {
                    // the bad version is caused by previous successful setData
                    return stat;
                  }
                } catch(KeeperException keeperException){
                  // the ZK is not reliable at this moment. just throwing exception
                  throw keeperException;
                }
              }
            // throw other exceptions and verified bad version exceptions
            default:
              throw e;
          }
        }
        retryCounter.sleepUntilNextRetry();
        isRetry = true;
      }
    } finally {
      if (traceScope != null) traceScope.close();
    }
  }
View Full Code Here

TOP

Related Classes of org.cloudera.htrace.TraceScope

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.