Package org.apache.hadoop.hdfs.server.datanode.metrics

Examples of org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics


    assertTrue("DN3 should be up", DataNode.isDatanodeUp(dns.get(2)));

    /*
     * The metrics should confirm the volume failures.
     */
    DataNodeMetrics metrics1 = dns.get(0).getMetrics();
    DataNodeMetrics metrics2 = dns.get(1).getMetrics();
    DataNodeMetrics metrics3 = dns.get(2).getMetrics();
    assertEquals("Vol1 should report 1 failure",
        1, metrics1.volumeFailures.getCurrentIntervalValue());
    assertEquals("Vol2 should report 1 failure",
        1, metrics2.volumeFailures.getCurrentIntervalValue());
    assertEquals("Vol3 should have no failures",
View Full Code Here


      DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
          LONG_FILE_LEN, (short)1, 1L);
      List<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(datanodes.size(), 1);
      DataNode datanode = datanodes.get(0);
      DataNodeMetrics metrics = datanode.getMetrics();
      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
    } finally {
      if (cluster != null) {cluster.shutdown();}
    }
  }
View Full Code Here

    initDataXceiver(conf);
    initFsDataSet(conf, dataDirs);
    initBlockScanner(conf);
    startInfoServer(conf);
 
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());
    // TODO check what code removed here

    initIpcServer(conf);
    startPlugins(conf);
   
View Full Code Here

    System.out.println("p=" + p);

    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
    DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

  DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
  MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
  DelayAnswer delayAnswer = new DelayAnswer();
  doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
  metrics.bytesWritten = spyBytesWritten;
View Full Code Here

    registerMXBean();
    initDataXceiver(conf);
    startInfoServer(conf);
    initIpcServer(conf);

    myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
  }
View Full Code Here

    registerMXBean();
    initDataXceiver(conf);
    startInfoServer(conf);
    initIpcServer(conf);

    myMetrics = new DataNodeMetrics(conf, storage.getStorageID());
    setCountingLoggers(myMetrics);
    threadLivenessReporter = new DatanodeThreadLivenessReporter(conf.getLong(
        "dfs.datanode.thread.liveness.threshold", 240 * 1000),
        myMetrics.threadActiveness);
  }
View Full Code Here

    System.out.println("p=" + p);

    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
    DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();

  DataNodeMetrics metrics = cluster.getDataNodes().get(0).myMetrics;
  MetricsTimeVaryingLong spyBytesWritten = spy(metrics.bytesWritten);
  DelayAnswer delayAnswer = new DelayAnswer();
  doAnswer(delayAnswer).when(spyBytesWritten).inc(anyInt());
  metrics.bytesWritten = spyBytesWritten;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.