Package org.apache.hadoop.hdfs.MiniDFSCluster

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties


      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0);
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(System.getProperty("test.build.data"),
          "dfs/data/data1/current/dncp_block_verification.log.curr");
      //wait for one minute for deletion to succeed;
      for(int i=0; !scanLog.delete(); i++) {
View Full Code Here


     * Even if told to use the configuration properties for dfs.datanode,
     * MiniDFSCluster.startDataNodes() should use localhost as the default if
     * the dfs.datanode properties are not set.
     *------------------------------------------------------------------------*/
    for (int i = 0; i < dns.size(); i++) {
      DataNodeProperties dnp = cluster.stopDataNode(i);
      assertNotNull("Should have been able to stop simulated datanode", dnp);
    }

    conf.unset(DFS_DATANODE_ADDRESS_KEY);
    conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
    conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);

    cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
                           null, null, null, false, true);

    dns = cluster.getDataNodes();
    dn = dns.get(0);

    selfSocketAddr = dn.getXferAddress().toString();
    System.out.println("DN Self Socket Addr == " + selfSocketAddr);
    // assert that default self socket address is 127.0.0.1
    assertTrue(selfSocketAddr.contains("/127.0.0.1:"));

    /*-------------------------------------------------------------------------
     * Shut down the datanodes, reconfigure, and bring them back up.
     * This time, modify the dfs.datanode properties and make sure that they
     * are used to configure sockets by MiniDFSCluster.startDataNodes().
     *------------------------------------------------------------------------*/
    for (int i = 0; i < dns.size(); i++) {
      DataNodeProperties dnp = cluster.stopDataNode(i);
      assertNotNull("Should have been able to stop simulated datanode", dnp);
    }

    conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
    conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
View Full Code Here

   * chunks and the transceiver should die, even if it has a long keepalive.
   */
  @Test(timeout=30000)
  public void testSlowReader() throws Exception {
    // Restart the DN with a shorter write timeout.
    DataNodeProperties props = cluster.stopDataNode(0);
    props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
        WRITE_TIMEOUT);
    props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
        120000);
    assertTrue(cluster.restartDataNode(props, true));
View Full Code Here

      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
     
      // make sure that NN detects that the datanode is down
      BlockManagerTestUtil.noticeDeadDatanode(
          cluster.getNameNode(), datanode.getXferAddr());
     
View Full Code Here

      HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
     
      // Stop the DN.
      DataNode dn = cluster.getDataNodes().get(0);
      String dnName = dn.getDatanodeId().getXferAddr();
      DataNodeProperties dnProps = cluster.stopDataNode(0);
     
      // Make sure both NNs register it as dead.
      BlockManagerTestUtil.noticeDeadDatanode(nn0, dnName);
      BlockManagerTestUtil.noticeDeadDatanode(nn1, dnName);
     
View Full Code Here

     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the primary DN
      DataNodeProperties badDN = cluster.stopDataNode(0);
     
      // Truncate the block on the primary DN
      corruptDataNode(0, corrupt);

      // Start the DN back up
View Full Code Here

     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the DNs
      DataNodeProperties badDN = cluster.stopDataNode(0);
      DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
      assertNotNull(badDN);
      assertNotNull(badDN2);
     
      // Truncate one of them as if its journal got corrupted
      corruptDataNode(0, CorruptionType.TRUNCATE_BLOCK_HALF);
View Full Code Here

      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here

      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here

      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      Block block = DFSTestUtil.getFirstBlock(fs, fileName);
      TestDatanodeBlockScanner.corruptReplica(block.getBlockName(), 0);
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(System.getProperty("test.build.data"),
          "dfs/data/data1" + MiniDFSCluster.FINALIZED_DIR_NAME +
          "dncp_block_verification.log.curr");
      //wait for one minute for deletion to succeed;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.