Package org.apache.hadoop.metrics

Examples of org.apache.hadoop.metrics.MetricsRecord


        poolToJobCounters.put(pool, poolCounters);
      }
      accumulateCounters(poolCounters, counters);

      if (!poolToMetricsRecord.containsKey(pool)) {
        MetricsRecord poolRecord = context.createRecord("pool-" + pool);
        poolToMetricsRecord.put(pool, poolRecord);
      }
    }
  }
View Full Code Here


    // The gets + puts below are OK because only one thread is doing it.
    for (PoolGroupSchedulable poolGroup : poolGroupManager.getPoolGroups()) {
      int poolGroupSessions = 0;
      for (PoolSchedulable pool : poolGroup.getPools()) {
        MetricsRecord poolRecord =
            poolInfoToMetricsRecord.get(pool.getPoolInfo());
        if (poolRecord == null) {
          poolRecord = metrics.getContext().createRecord(
              "pool-" + pool.getName());
          poolInfoToMetricsRecord.put(pool.getPoolInfo(), poolRecord);
        }

        PoolInfoMetrics poolMetrics = new PoolInfoMetrics(pool.getPoolInfo(),
            type, poolRecord);
        poolMetrics.setCounter(
            MetricName.GRANTED, pool.getGranted());
        poolMetrics.setCounter(
            MetricName.REQUESTED, pool.getRequested());
        poolMetrics.setCounter(
            MetricName.SHARE, (long) pool.getShare());
        poolMetrics.setCounter(
            MetricName.MIN, pool.getMinimum());
        poolMetrics.setCounter(
            MetricName.MAX, pool.getMaximum());
        poolMetrics.setCounter(
            MetricName.WEIGHT, (long) pool.getWeight());
        poolMetrics.setCounter(
            MetricName.SESSIONS, pool.getScheduleQueue().size());
        poolMetrics.setCounter(
            MetricName.STARVING, pool.getStarvingTime(now) / 1000);
        Long averageFirstTypeMs =
            poolInfoAverageFirstWaitMs.get(pool.getPoolInfo());
        poolMetrics.setCounter(MetricName.AVE_FIRST_WAIT_MS,
            (averageFirstTypeMs == null) ?
                0 : averageFirstTypeMs.longValue());

        newPoolNameToMetrics.put(pool.getPoolInfo(), poolMetrics);
        poolGroupSessions += pool.getScheduleQueue().size();
      }

      MetricsRecord poolGroupRecord =
          poolInfoToMetricsRecord.get(poolGroup.getName());
      if (poolGroupRecord == null) {
        poolGroupRecord = metrics.getContext().createRecord(
            "poolgroup-" + poolGroup.getName());
        poolInfoToMetricsRecord.put(poolGroup.getPoolInfo(), poolGroupRecord);
View Full Code Here

  @Test
  public void testPushMetric() {
    final MetricsHistogram h = new MetricsHistogram("testHistogram", null);
    genRandomData(h);

    MetricsRecord mr = mock(MetricsRecord.class);
    h.pushMetric(mr);
   
    verify(mr).setMetric("testHistogram_num_ops", 10000L);
    verify(mr).setMetric(eq("testHistogram_min"), anyLong());
    verify(mr).setMetric(eq("testHistogram_max"), anyLong());
View Full Code Here

    metricsRecord.setMetric("adhoc_avg_first_map_wait_ms", nonConfiguredAvgFirstMapWaitTime);
    metricsRecord.setMetric("adhoc_avg_first_reduce_wait_ms", nonConfiguredAvgFirstReduceWaitTime);
  }

  private void submitPoolMetrics(PoolInfo info) {
    MetricsRecord record = poolToMetricsRecord.get(info.poolName);
    if (record == null) {
      record = MetricsUtil.createRecord(context, "pool-" + info.poolName);
      FairScheduler.LOG.info("Create metrics record for pool:" + info.poolName);
      poolToMetricsRecord.put(info.poolName, record);
    }
    record.setMetric("min_map", info.minMaps);
    record.setMetric("min_reduce", info.minReduces);
    record.setMetric("max_map", info.maxMaps);
    record.setMetric("max_reduce", info.maxReduces);
    record.setMetric("running_map", info.runningMaps);
    record.setMetric("running_reduce", info.runningReduces);
    record.setMetric("runnable_map", info.runnableMaps);
    record.setMetric("runnable_reduce", info.runnableReduces);
    record.setMetric("inited_tasks", info.initedTasks);
    record.setMetric("max_inited_tasks", info.maxInitedTasks);
    int runningJobs = info.runningJobs;
    record.setMetric("avg_first_map_wait_ms",
        (runningJobs == 0) ? 0 : info.totalFirstMapWaitTime / runningJobs);
    record.setMetric("avg_first_reduce_wait_ms",
        (runningJobs == 0) ? 0 : info.totalFirstReduceWaitTime / runningJobs);
  }
View Full Code Here

  }

 
  @Override
  protected MetricsRecord newRecord(String recordName) {
    MetricsRecord record = super.newRecord(recordName);
    if (records.isEmpty() || records.contains(recordName)) {
      // Create MBean to expose this record
      // Only if this record is to be exposed through JMX
      getOrCreateMBean(recordName);
    }
View Full Code Here

        poolToJobCounters.put(pool, poolCounters);
      }
      accumulateCounters(poolCounters, counters);

      if (!poolToMetricsRecord.containsKey(pool)) {
        MetricsRecord poolRecord = context.createRecord("pool-" + pool);
        poolToMetricsRecord.put(pool, poolRecord);
      }
    }
  }
View Full Code Here

    // The gets + puts below are OK because only one thread is doing it.
    for (PoolGroupSchedulable poolGroup : poolGroupManager.getPoolGroups()) {
      int poolGroupSessions = 0;
      for (PoolSchedulable pool : poolGroup.getPools()) {
        MetricsRecord poolRecord =
            poolInfoToMetricsRecord.get(pool.getPoolInfo());
        if (poolRecord == null) {
          poolRecord = metrics.getContext().createRecord(
              "pool-" + pool.getName());
          poolInfoToMetricsRecord.put(pool.getPoolInfo(), poolRecord);
        }

        PoolInfoMetrics poolMetrics = new PoolInfoMetrics(pool.getPoolInfo(),
            type, poolRecord);
        poolMetrics.setCounter(
            MetricName.GRANTED, pool.getGranted());
        poolMetrics.setCounter(
            MetricName.REQUESTED, pool.getRequested());
        poolMetrics.setCounter(
            MetricName.SHARE, (long) pool.getShare());
        poolMetrics.setCounter(
            MetricName.MIN, pool.getMinimum());
        poolMetrics.setCounter(
            MetricName.MAX, pool.getMaximum());
        poolMetrics.setCounter(
            MetricName.WEIGHT, (long) pool.getWeight());
        poolMetrics.setCounter(
            MetricName.SESSIONS, pool.getScheduleQueue().size());
        poolMetrics.setCounter(
            MetricName.STARVING, pool.getStarvingTime(now) / 1000);
        Long averageFirstTypeMs =
            poolInfoAverageFirstWaitMs.get(pool.getPoolInfo());
        poolMetrics.setCounter(MetricName.AVE_FIRST_WAIT_MS,
            (averageFirstTypeMs == null) ?
                0 : averageFirstTypeMs.longValue());

        newPoolNameToMetrics.put(pool.getPoolInfo(), poolMetrics);
        poolGroupSessions += pool.getScheduleQueue().size();
      }

      MetricsRecord poolGroupRecord =
          poolInfoToMetricsRecord.get(poolGroup.getName());
      if (poolGroupRecord == null) {
        poolGroupRecord = metrics.getContext().createRecord(
            "poolgroup-" + poolGroup.getName());
        poolInfoToMetricsRecord.put(poolGroup.getPoolInfo(), poolGroupRecord);
View Full Code Here

      IPCLoggerChannelMetrics m = REGISTRY.get(name);
      if (m != null) {
        m.setChannel(ch);
      } else {
        MetricsContext metricsContext = MetricsUtil.getContext("dfs");
        MetricsRecord metricsRecord = MetricsUtil.createRecord(metricsContext,
            "loggerchannel");
        metricsRecord.setTag("loggerchannel", name);
        m = new IPCLoggerChannelMetrics(ch, metricsRecord, name);
        metricsContext.registerUpdater(m);
        REGISTRY.put(name, m);
      }
      return m;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.metrics.MetricsRecord

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.