Package com.google.common.base

Examples of com.google.common.base.Stopwatch


    @VisibleForTesting
    HealthCheckResult checkHealth(ServiceEndPoint endPoint) {
        // We have to be very careful to not allow any exceptions to make it out of of this method, if they do then
        // subsequent scheduled invocations of the Runnable may not happen, and we could stop checking health checks
        // completely.  So we intentionally handle all possible exceptions here.
        Stopwatch sw = new Stopwatch(_ticker).start();

        try {
            return  _serviceFactory.isHealthy(endPoint)
                    ? new SuccessfulHealthCheckResult(endPoint.getId(), sw.stop().elapsedTime(TimeUnit.NANOSECONDS))
                    : new FailedHealthCheckResult(endPoint.getId(), sw.stop().elapsedTime(TimeUnit.NANOSECONDS));
        } catch (Exception e) {
            return new FailedHealthCheckResult(endPoint.getId(), sw.stop().elapsedTime(TimeUnit.NANOSECONDS), e);
        } finally {
            _healthCheckTime.update(sw.elapsedTime(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
        }
    }
View Full Code Here


                // And just for kicks, throw in the domain summary too (as that can only help prune down the ranges)
                // The domains should never widen between each pass.
                tupleDomain = tupleDomain.intersect(node.getGeneratedPartitions().get().getTupleDomainInput()).intersect(node.getPartitionsDomainSummary());
            }

            Stopwatch partitionTimer = Stopwatch.createStarted();
            PartitionResult matchingPartitions = splitManager.getPartitions(node.getTable(), Optional.of(tupleDomain));
            List<Partition> partitions = matchingPartitions.getPartitions();
            TupleDomain undeterminedTupleDomain = matchingPartitions.getUndeterminedTupleDomain();
            log.debug("Partition retrieval, table %s (%d partitions): %dms", node.getTable(), partitions.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

            Expression unevaluatedDomainPredicate = DomainTranslator.toPredicate(undeterminedTupleDomain, ImmutableBiMap.copyOf(node.getAssignments()).inverse());

            // Construct the post scan predicate. Add the unevaluated TupleDomain back first since those are generally cheaper to evaluate than anything we can't extract
            Expression postScanPredicate = combineConjuncts(unevaluatedDomainPredicate, extractionRemainingExpression);
View Full Code Here

    }

    @Override
    public ConnectorPartitionResult getPartitions(ConnectorTableHandle tableHandle, TupleDomain<ConnectorColumnHandle> tupleDomain)
    {
        Stopwatch partitionTimer = Stopwatch.createStarted();

        checkType(tableHandle, RaptorTableHandle.class, "table");

        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(tableHandle);

        checkState(tableMetadata != null, "no metadata for %s found", tableHandle);

        Set<TablePartition> tablePartitions = shardManager.getPartitions(tableHandle);

        log.debug("Partition retrieval, raptor table %s (%d partitions): %dms", tableHandle, tablePartitions.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        Multimap<String, ? extends PartitionKey> allPartitionKeys = shardManager.getAllPartitionKeys(tableHandle);
        Map<String, ConnectorColumnHandle> columnHandles = metadata.getColumnHandles(tableHandle);

        log.debug("Partition key retrieval, raptor table %s (%d keys): %dms", tableHandle, allPartitionKeys.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        List<ConnectorPartition> partitions = ImmutableList.copyOf(transform(tablePartitions, partitionMapper(allPartitionKeys, columnHandles)));

        log.debug("Partition generation, raptor table %s (%d partitions): %dms", tableHandle, partitions.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        return new ConnectorPartitionResult(partitions, tupleDomain);
    }
View Full Code Here

    }

    @Override
    public ConnectorSplitSource getPartitionSplits(ConnectorTableHandle tableHandle, List<ConnectorPartition> partitions)
    {
        Stopwatch splitTimer = Stopwatch.createStarted();

        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return new FixedSplitSource(connectorId, ImmutableList.<ConnectorSplit>of());
        }

        Map<String, Node> nodesById = uniqueIndex(nodeManager.getActiveNodes(), getIdentifierFunction());

        List<ConnectorSplit> splits = new ArrayList<>();

        Multimap<Long, Entry<UUID, String>> partitionShardNodes = shardManager.getShardNodesByPartition(tableHandle);

        for (ConnectorPartition partition : partitions) {
            RaptorPartition raptorPartition = checkType(partition, RaptorPartition.class, "partition");

            ImmutableMultimap.Builder<UUID, String> shardNodes = ImmutableMultimap.builder();
            for (Entry<UUID, String> shardNode : partitionShardNodes.get(raptorPartition.getRaptorPartitionId())) {
                shardNodes.put(shardNode.getKey(), shardNode.getValue());
            }

            for (Map.Entry<UUID, Collection<String>> entry : shardNodes.build().asMap().entrySet()) {
                List<HostAddress> addresses = getAddressesForNodes(nodesById, entry.getValue());
                checkState(!addresses.isEmpty(), "no host for shard %s found: %s", entry.getKey(), entry.getValue());
                ConnectorSplit split = new RaptorSplit(entry.getKey(), addresses);
                splits.add(split);
            }
        }

        log.debug("Split retrieval for %d partitions (%d splits): %dms", partitions.size(), splits.size(), splitTimer.elapsed(TimeUnit.MILLISECONDS));

        // The query engine assumes that splits are returned in a somewhat random fashion. The Raptor split manager,
        // because it loads the data from a database table, will return the splits somewhat ordered by node ID,
        // so only a subset of nodes are fired up. Shuffle the splits to ensure random distribution.
        Collections.shuffle(splits);
View Full Code Here

     */
    public PerformanceLogger(long threshold)
    {
        this.from = StackTrace.firstElementBelowClass();
        this.threshold = threshold;
        this.stopwatch = new Stopwatch().start();
    }
View Full Code Here

    }

    @Override
    public List<Partition> getPartitions(TableHandle tableHandle, Map<ColumnHandle, Object> bindings)
    {
        Stopwatch partitionTimer = new Stopwatch();
        partitionTimer.start();

        checkArgument(tableHandle instanceof NativeTableHandle, "Table must be a native table");

        TableMetadata tableMetadata = metadata.getTableMetadata(tableHandle);

        checkState(tableMetadata != null, "no metadata for %s found", tableHandle);

        Set<TablePartition> tablePartitions = shardManager.getPartitions(tableHandle);

        log.debug("Partition retrieval, native table %s (%d partitions): %dms", tableHandle, tablePartitions.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        Multimap<String, ? extends PartitionKey> allPartitionKeys = shardManager.getAllPartitionKeys(tableHandle);
        Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(tableHandle);

        log.debug("Partition key retrieval, native table %s (%d keys): %dms", tableHandle, allPartitionKeys.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        List<Partition> partitions = ImmutableList.copyOf(Collections2.transform(tablePartitions, new PartitionFunction(columnHandles, allPartitionKeys)));

        log.debug("Partition generation, native table %s (%d partitions): %dms", tableHandle, partitions.size(), partitionTimer.elapsed(TimeUnit.MILLISECONDS));

        return partitions;
    }
View Full Code Here

    }

    @Override
    public Iterable<Split> getPartitionSplits(TableHandle tableHandle, List<Partition> partitions)
    {
        Stopwatch splitTimer = new Stopwatch();
        splitTimer.start();

        checkNotNull(partitions, "partitions is null");
        if (partitions.isEmpty()) {
            return ImmutableList.of();
        }

        Map<String, Node> nodesById = uniqueIndex(nodeManager.getAllNodes().getActiveNodes(), Node.getIdentifierFunction());

        List<Split> splits = new ArrayList<>();

        Multimap<Long, Entry<Long, String>> partitionShardNodes = shardManager.getCommittedPartitionShardNodes(tableHandle);

        for (Partition partition : partitions) {
            checkArgument(partition instanceof NativePartition, "Partition must be a native partition");
            NativePartition nativePartition = (NativePartition) partition;

            ImmutableMultimap.Builder<Long, String> shardNodes = ImmutableMultimap.builder();
            for (Entry<Long, String> partitionShardNode : partitionShardNodes.get(nativePartition.getNativePartitionId())) {
                shardNodes.put(partitionShardNode.getKey(), partitionShardNode.getValue());
            }

            for (Map.Entry<Long, Collection<String>> entry : shardNodes.build().asMap().entrySet()) {
                List<HostAddress> addresses = getAddressesForNodes(nodesById, entry.getValue());
                checkState(addresses.size() > 0, "no host for shard %s found", entry.getKey());
                Split split = new NativeSplit(entry.getKey(), addresses);
                splits.add(split);
            }
        }

        log.debug("Split retrieval for %d partitions (%d splits): %dms", partitions.size(), splits.size(), splitTimer.elapsed(TimeUnit.MILLISECONDS));

        // the query engine assumes that splits are returned in a somewhat random fashion. The native split manager,
        // because it loads the data from a db table will return the splits somewhat ordered by node id so only a sub
        // set of nodes is fired up. Shuffle the splits to ensure random distribution.
        Collections.shuffle(splits);
View Full Code Here

        functions = ImmutableList.copyOf(functionFlowBuilder.functions);
    }

    public void apply(C context) {
        logger.info("Applying {} functions...", functions.size());
        Stopwatch stopwatch = new Stopwatch();
        for (Function<C, C> function : functions) {
            logger.info("Starting {} ...", function.getClass().getName());
            stopwatch.reset().start();
            context = function.apply(context);
            stopwatch.stop();
            logger.info("{} done. Tooks {} ms.", function.getClass().getName(), stopwatch.elapsed(MILLISECONDS));
        }
        logger.info("Apply done.");
    }
View Full Code Here

   * implement {@code loadAll}.
   */
  @Nullable
  Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader)
      throws ExecutionException {
    Stopwatch stopwatch = new Stopwatch().start();
    Map<K, V> result;
    boolean success = false;
    try {
      @SuppressWarnings("unchecked") // safe since all keys extend K
      Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
      result = map;
      success = true;
    } catch (UnsupportedLoadingOperationException e) {
      success = true;
      throw e;
    } catch (InterruptedException e) {
      Thread.currentThread().interrupt();
      throw new ExecutionException(e);
    } catch (RuntimeException e) {
      throw new UncheckedExecutionException(e);
    } catch (Exception e) {
      throw new ExecutionException(e);
    } catch (Error e) {
      throw new ExecutionError(e);
    } finally {
      if (!success) {
        globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      }
    }

    if (result == null) {
      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
    }

    stopwatch.stop();
    // TODO(fry): batch by segment
    boolean nullsPresent = false;
    for (Map.Entry<K, V> entry : result.entrySet()) {
      K key = entry.getKey();
      V value = entry.getValue();
      if (key == null || value == null) {
        // delay failure until non-null entries are stored
        nullsPresent = true;
      } else {
        put(key, value);
      }
    }

    if (nullsPresent) {
      globalStatsCounter.recordLoadException(stopwatch.elapsedTime(NANOSECONDS));
      throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
    }

    // TODO(fry): record count of loaded entries
    globalStatsCounter.recordLoadSuccess(stopwatch.elapsedTime(NANOSECONDS));
    return result;
  }
View Full Code Here

    //Initializing methodsDriver
    Map<String, StreamSchema> schemaMap = MetaInformationParser.getSchemaMap(new File(binDir.toURI()));
    methodsDriver = new MethodsDriver(this, schemaMap);

    //Initialize stopwatch and retry counter
    stopwatch = new Stopwatch();
    retryCounter = 0;
  }
View Full Code Here

TOP

Related Classes of com.google.common.base.Stopwatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.