Package co.cask.cdap.common.metrics

Examples of co.cask.cdap.common.metrics.MetricsCollector


  @Override
  public boolean preCall(HttpRequest request, HttpResponder responder, HandlerInfo handlerInfo) {
    if (metricsCollectionService != null) {
      try {
        MetricsCollector collector = collectorCache.get(createContext(handlerInfo));
        collector.increment("request.received", 1);
      } catch (Throwable e) {
        LOG.error("Got exception while getting collector", e);
      }
    }
    return true;
View Full Code Here


  @Override
  public void postCall(HttpRequest request, HttpResponseStatus status, HandlerInfo handlerInfo) {
    if (metricsCollectionService != null) {
      try {
        MetricsCollector collector = collectorCache.get(createContext(handlerInfo));
        String name;
        int code = status.getCode();
        if (code < 100) {
          name = "unknown";
        } else if (code < 200) {
          name = "information";
        } else if (code < 300) {
          name = "successful";
        } else if (code < 400) {
          name = "redirect";
        } else if (code < 500) {
          name = "client-error";
        } else if (code < 600) {
          name = "server-error";
        } else {
          name = "unknown";
        }
        collector.increment("response." + name, 1, "status:" + code);
      } catch (Throwable e) {
        LOG.error("Got exception while getting collector", e);
      }
    }
  }
View Full Code Here

    }
  }

  private void report(Map<String, HBaseTableUtil.TableStats> datasetStat) {
    // we use "0" as runId: it is required by metrics system to provide something at this point
    MetricsCollector collector =
      metricsService.getCollector(MetricsScope.SYSTEM, Constants.Metrics.DATASET_CONTEXT, "0");
    for (Map.Entry<String, HBaseTableUtil.TableStats> statEntry : datasetStat.entrySet()) {
      String datasetName = userDsNamespace.fromNamespaced(statEntry.getKey());
      if (datasetName == null) {
        // not a user dataset
        continue;
      }
      // legacy format: dataset name is in the tag. See DatasetInstantiator for more details
      collector.increment("dataset.size.mb", statEntry.getValue().getTotalSizeMB(), datasetName);
    }
  }
View Full Code Here

    }
  }

  private void report(Map<String, LevelDBOrderedTableService.TableStats> datasetStat) {
    // we use "0" as runId: it is required by metrics system to provide something at this point
    MetricsCollector collector =
      metricsService.getCollector(MetricsScope.SYSTEM, Constants.Metrics.DATASET_CONTEXT, "0");
    for (Map.Entry<String, LevelDBOrderedTableService.TableStats> statEntry : datasetStat.entrySet()) {
      String datasetName = userDsNamespace.fromNamespaced(statEntry.getKey());
      if (datasetName == null) {
        // not a user dataset
        continue;
      }
      // legacy format: dataset name is in the tag. See DatasetInstantiator for more details
      int sizeInMb = (int) (statEntry.getValue().getDiskSizeBytes() / BYTES_IN_MB);
      collector.increment("dataset.size.mb", sizeInMb, datasetName);
    }
  }
View Full Code Here

        JsonObject response = new Gson().fromJson(reader, JsonObject.class);
        if (response != null) {
          JsonObject clusterMetrics = response.getAsJsonObject("clusterMetrics");
          long totalMemory = clusterMetrics.get("totalMB").getAsLong();
          long availableMemory = clusterMetrics.get("availableMB").getAsLong();
          MetricsCollector collector = getCollector(CLUSTER_METRICS_CONTEXT);
          LOG.trace("resource manager, total memory = " + totalMemory + " available = " + availableMemory);
          collector.increment("resources.total.memory", (int) totalMemory);
          collector.increment("resources.available.memory", (int) availableMemory);
        } else {
          LOG.warn("unable to get resource manager metrics, cluster memory metrics will be unavailable");
        }
      } catch (IOException e) {
        LOG.error("Exception getting cluster memory from ", e);
View Full Code Here

        FsStatus hdfsStatus = hdfs.getStatus();
        long storageCapacity = hdfsStatus.getCapacity();
        long storageAvailable = hdfsStatus.getRemaining();

        MetricsCollector collector = getCollector(CLUSTER_METRICS_CONTEXT);
        // TODO: metrics should support longs
        LOG.trace("total cluster storage = " + storageCapacity + " total used = " + totalUsed);
        collector.increment("resources.total.storage", (int) (storageCapacity / 1024 / 1024));
        collector.increment("resources.available.storage", (int) (storageAvailable / 1024 / 1024));
        collector.increment("resources.used.storage", (int) (totalUsed / 1024 / 1024));
        collector.increment("resources.used.files", (int) totalFiles);
        collector.increment("resources.used.directories", (int) totalDirectories);
      } catch (IOException e) {
        LOG.warn("Exception getting hdfs metrics", e);
      }
    }
View Full Code Here

                         DiscoveryServiceClient discoveryServiceClient) {
    this.program = program;
    this.runId = runId;
    this.discoveryServiceClient = discoveryServiceClient;

    MetricsCollector datasetMetrics;
    if (metricsCollectionService != null) {
      // NOTE: RunId metric is not supported now. Need UI refactoring to enable it.
      this.programMetrics = metricsCollectionService.getCollector(MetricsScope.SYSTEM, metricsContext, "0");
      datasetMetrics = metricsCollectionService.getCollector(MetricsScope.SYSTEM,
                                                             Constants.Metrics.DATASET_CONTEXT, "0");
View Full Code Here

  }

  protected void sendMetrics(String context, int containers, int memory, int vcores) {
    LOG.trace("Reporting resources in context {}: (containers, memory, vcores) = ({}, {}, {})",
              context, containers, memory, vcores);
    MetricsCollector collector = collectionService.getCollector(MetricsScope.SYSTEM, context, "0");
    collector.increment(METRIC_CONTAINERS, containers);
    collector.increment(METRIC_MEMORY_USAGE, memory);
    collector.increment(METRIC_VIRTUAL_CORE_USAGE, vcores);
  }
View Full Code Here

  private CacheLoader<CollectorKey, MetricsCollector> createCollectorLoader() {
    return new CacheLoader<CollectorKey, MetricsCollector>() {
      @Override
      public MetricsCollector load(final CollectorKey collectorKey) throws Exception {
        return new MetricsCollector() {

          // Cache for minimizing creating new MetricKey object.
          private final LoadingCache<String, EmitterKey> keys =
            CacheBuilder.newBuilder()
              .expireAfterAccess(CACHE_EXPIRE_MINUTES, TimeUnit.MINUTES)
View Full Code Here

TOP

Related Classes of co.cask.cdap.common.metrics.MetricsCollector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.