Package com.google.common.base

Examples of com.google.common.base.Stopwatch


        return createComposites(metrics, beginTime, endTime, numberOfBuckets);
    }

    public AggregateNumericMetric getSummaryAggregate(int scheduleId, long beginTime, long endTime) {
        Stopwatch stopwatch = new Stopwatch().start();
        try {
            DateTime begin = new DateTime(beginTime);

            if (dateTimeService.isInRawDataRange(begin)) {
                Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleId, beginTime, endTime);
                return calculateAggregatedRaw(metrics, beginTime);
            }

            Bucket bucket = getBucket(begin);
            List<AggregateNumericMetric> metrics = dao.findAggregateMetrics(scheduleId, bucket, beginTime, endTime);
            return calculateAggregate(metrics, beginTime, bucket);
        } finally {
            stopwatch.stop();
            if (log.isDebugEnabled()) {
                log.debug("Finished calculating resource summary aggregate for [scheduleId: " + scheduleId +
                    ", beginTime: " + beginTime + ", endTime: " + endTime + "] in " +
                    stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
            }
        }
    }
View Full Code Here


            }
        }
    }

    public AggregateNumericMetric getSummaryAggregate(List<Integer> scheduleIds, long beginTime, long endTime) {
        Stopwatch stopwatch = new Stopwatch().start();
        try {
            DateTime begin = new DateTime(beginTime);

            if (dateTimeService.isInRawDataRange(new DateTime(beginTime))) {
                Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleIds, beginTime, endTime);
                return calculateAggregatedRaw(metrics, beginTime);
            }
            Bucket bucket = getBucket(begin);
            List<AggregateNumericMetric> metrics = loadMetrics(scheduleIds, beginTime, endTime, bucket);

            return calculateAggregate(metrics, beginTime, bucket);
        } finally {
            stopwatch.stop();
            if (log.isDebugEnabled()) {
                log.debug("Finished calculating group summary aggregate for [scheduleIds: " + scheduleIds +
                    ", beginTime: " + beginTime + ", endTime: " + endTime + "] in " +
                    stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
            }
        }
    }
View Full Code Here

    public void addNumericData(final Set<MeasurementDataNumeric> dataSet, final RawDataInsertedCallback callback) {
        if (log.isDebugEnabled()) {
            log.debug("Inserting " + dataSet.size() + " raw metrics");
        }
        final Stopwatch stopwatch = new Stopwatch().start();
        final AtomicInteger remainingInserts = new AtomicInteger(dataSet.size());

        for (final MeasurementDataNumeric data : dataSet) {
            DateTime collectionTimeSlice = dateTimeService.getTimeSlice(new DateTime(data.getTimestamp()),
                configuration.getRawTimeSliceDuration());
            Days days = Days.daysBetween(collectionTimeSlice, dateTimeService.now());

            if (days.isGreaterThan(rawDataAgeLimit)) {
                log.info(data + " is older than the raw data age limit of " + rawDataAgeLimit.getDays() +
                    " days. It will not be stored.");
            } else {
                StorageResultSetFuture rawFuture = dao.insertRawData(data);
                StorageResultSetFuture indexFuture = dao.updateIndex(IndexBucket.RAW, collectionTimeSlice.getMillis(),
                    data.getScheduleId());
                ListenableFuture<List<ResultSet>> insertsFuture = Futures.successfulAsList(rawFuture, indexFuture);
                Futures.addCallback(insertsFuture, new FutureCallback<List<ResultSet>>() {
                    @Override
                    public void onSuccess(List<ResultSet> result) {
                        callback.onSuccess(data);
                        if (remainingInserts.decrementAndGet() == 0) {
                            stopwatch.stop();
                            if (log.isDebugEnabled()) {
                                log.debug("Finished inserting " + dataSet.size() + " raw metrics in " +
                                    stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
                            }
                            callback.onFinish();
                        }
                    }
View Full Code Here

        return dao.findLatestRawMetric(scheduleId);
    }

    public Iterable<MeasurementDataNumericHighLowComposite> findDataForResource(int scheduleId, long beginTime,
        long endTime, int numberOfBuckets) {
        Stopwatch stopwatch = new Stopwatch().start();
        try {
            DateTime begin = new DateTime(beginTime);

            if (dateTimeService.isInRawDataRange(begin)) {
                Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleId, beginTime, endTime);
                return createRawComposites(metrics, beginTime, endTime, numberOfBuckets);
            }

            List<AggregateNumericMetric> metrics = null;
            if (dateTimeService.isIn1HourDataRange(begin)) {
                metrics = dao.findAggregateMetrics(scheduleId, Bucket.ONE_HOUR, beginTime,
                    endTime);
                return createComposites(metrics, beginTime, endTime, numberOfBuckets);
            } else if (dateTimeService.isIn6HourDataRange(begin)) {
                metrics = dao.findAggregateMetrics(scheduleId, Bucket.SIX_HOUR, beginTime, endTime);
                return createComposites(metrics, beginTime, endTime, numberOfBuckets);
            } else if (dateTimeService.isIn24HourDataRange(begin)) {
                metrics = dao.findAggregateMetrics(scheduleId, Bucket.TWENTY_FOUR_HOUR, beginTime, endTime);
                return createComposites(metrics, beginTime, endTime, numberOfBuckets);
            } else {
                throw new IllegalArgumentException("beginTime[" + beginTime + "] is outside the accepted range.");
            }
        } finally {
            stopwatch.stop();
            if (log.isDebugEnabled()) {
                log.debug("Finished calculating resource summary aggregate in " +
                    stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
            }
        }
    }
View Full Code Here

    }

    @Override
    public void run() {
        Timer.Context context = metrics.totalReadTime.time();
        Stopwatch stopwatch = new Stopwatch().start();
        try {
            log.info("Running metrics queries");

            ThreadLocalRandom random = ThreadLocalRandom.current();
            int bound = startingSchedule + batchSize;

            findResourceDataForPast24Hours(random.nextInt(startingSchedule, bound));
            findResourceDataForPastWeek(random.nextInt(startingSchedule, bound));
            findResourceDataForPast2Weeks(random.nextInt(startingSchedule, bound));
            findResourceDataForPast31Days(random.nextInt(startingSchedule, bound));
            findResourceDataForPastYear(random.nextInt(startingSchedule, bound));
        } finally {
            stopwatch.stop();
            log.info("Finished running metrics queries in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
            context.stop();
        }
    }
View Full Code Here

    }

    @Override
    public void run() {
        final Timer.Context context = metrics.batchInsertTime.time();
        final Stopwatch stopwatch = new Stopwatch().start();
        metricsServer.addNumericData(generateData(), new RawDataInsertedCallback() {
            @Override
            public void onFinish() {
                stopwatch.stop();
                log.info("Finished inserting raw data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
                context.stop();
            }

            @Override
            public void onSuccess(MeasurementDataNumeric result) {
View Full Code Here

        if (dbConnectionFactory == null) {
            log.info("The relational database connection factory is not set. No data migration necessary");
        } else {
            writePermits = RateLimiter.create(calculatePermits(), 30, TimeUnit.SECONDS);

            Stopwatch stopwatch = new Stopwatch().start();
            initPreparedStatements();
            Set<Integer> scheduleIds = loadScheduleIds();

            log.info("Migrating aggregate metrics for " + scheduleIds.size() + " schedule ids");

            migrate(scheduleIds, find1HourData, Bucket.ONE_HOUR);
            migrate(scheduleIds, find6HourData, Bucket.SIX_HOUR);
            migrate(scheduleIds, find24HourData, Bucket.TWENTY_FOUR_HOUR);

            stopwatch.stop();
            log.info("Finished aggregate metrics migration in " + stopwatch.elapsed(TimeUnit.SECONDS) + " seconds");

            if (failedMigrations.get() > 0) {
                throw new RuntimeException("There were " + failedMigrations.get() + " failed migrations. The " +
                    "upgrade will have to be run again to complete the migration.");
            }
View Full Code Here

    long startTime = System.currentTimeMillis();
    final int printInterval = 100000;
    Random rd = new Random(id);
    boolean get = c.getBoolean("hbase.test.do.gets", false);
    try {
      Stopwatch stopWatch = new Stopwatch();
      stopWatch.start();
      for (int i = 0; i < namespaceSpan; i++) {
        byte [] b = format(rd.nextLong());
        if (get){
          Get g = new Get(b);
          table.get(g);
        } else {
          Put p = new Put(b);
          p.add(HConstants.CATALOG_FAMILY, b, b);
          table.put(p);
        }
        if (i % printInterval == 0) {
          LOG.info("Put " + printInterval + "/" + stopWatch.elapsedMillis());
          stopWatch.reset();
          stopWatch.start();
        }
      }
      LOG.info("Finished a cycle putting " + namespaceSpan + " in " +
          (System.currentTimeMillis() - startTime) + "ms");
    } finally {
View Full Code Here

     *             the exception
     */
    private void markAndSweep() throws IOException, InterruptedException {
        boolean threw = true;
        try {
            Stopwatch sw = Stopwatch.createStarted();
            LOG.info("Starting Blob garbage collection");

            mark();
            int deleteCount = sweep();
            threw = false;

            LOG.info("Blob garbage collection completed in {}. Number of blobs " +
                    "deleted [{}]", sw.toString(), deleteCount);
        } finally {
            Closeables.close(fs, threw);
            state = State.NOT_RUNNING;
        }
    }
View Full Code Here

  }

  @Test
  public void testTriesNoMoreLongerThanTotalRetryPeriod() {
    final FakeTicker ticker = new FakeTicker();
    Stopwatch stopwatch = Stopwatch.createUnstarted(ticker);
    RetryParams params = new RetryParams.Builder().initialRetryDelayMillis(0)
        .totalRetryPeriodMillis(999)
        .retryMinAttempts(5)
        .retryMaxAttempts(10)
        .build();
View Full Code Here

TOP

Related Classes of com.google.common.base.Stopwatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.