Package org.apache.hadoop.hdfs.MiniDFSCluster

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties


    // corrupt the block on datanode dnIndex
    // the indexes change once the nodes are restarted.
    // But the datadirectory will not change
    assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block));

    DataNodeProperties dnProps = cluster.stopDataNode(0);

    // Each datanode has multiple data dirs, check each
    for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
      final String bpid = cluster.getNamesystem().getBlockPoolId();
      File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex);
View Full Code Here


      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
          cluster.getInstanceStorageDir(0, 0),
          cluster.getNamesystem().getBlockPoolId()).getParent().toString()
          + "/../dncp_block_verification.log.prev");
View Full Code Here

          out.hflush();
        }
       
       
        // Shutdown one of the nodes in the pipeline
        DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);

        // Write some more data and flush again. This data will only
        // be in the latter genstamp copy of the blocks.
        for (int i = 0; i < streams.size(); i++) {
          Path path = testPaths.get(i);
          FSDataOutputStream out = streams.get(i);

          out.writeBytes("new gs data\n");
          out.hflush();

          // Set replication so that only one node is necessary for this block,
          // and close it.
          cluster.getFileSystem().setReplication(path, (short)1);
          out.close();
        }
       
        // Upon restart, there will be two replicas, one with an old genstamp
        // and one current copy. This test wants to ensure that the old genstamp
        // copy is the one that is deleted.

        LOG.info("=========================== restarting cluster");
        DataNodeProperties otherNode = cluster.stopDataNode(0);
        cluster.restartNameNode();
       
        // Restart the datanode with the corrupt replica first.
        cluster.restartDataNode(oldGenstampNode);
        cluster.waitActive();
View Full Code Here

      Thread.sleep(10000);
      DataNode dn = cluster.getDataNodes().get(0);
      assertTrue("Datanode should be running", dn.isDatanodeUp());
      assertEquals("BPOfferService should be running", 1,
          dn.getAllBpOs().length);
      DataNodeProperties dnProp = cluster.stopDataNode(0);

      cluster.getNameNode(0).stop();
      cluster.getNameNode(1).stop();
      Configuration nn1 = cluster.getConfiguration(0);
      Configuration nn2 = cluster.getConfiguration(1);
View Full Code Here

      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
          cluster.getInstanceStorageDir(0, 0),
          cluster.getNamesystem().getBlockPoolId()).getParent().toString()
          + "/../dncp_block_verification.log.prev");
View Full Code Here

     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the primary DN
      DataNodeProperties badDN = cluster.stopDataNode(0);
     
      // Truncate the block on the primary DN
      corruptDataNode(0, corrupt);

      // Start the DN back up
View Full Code Here

     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the DNs
      DataNodeProperties badDN = cluster.stopDataNode(0);
      DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
      assertNotNull(badDN);
      assertNotNull(badDN2);
     
      // Truncate one of them as if its journal got corrupted
      corruptDataNode(0, CorruptionType.TRUNCATE_BLOCK_HALF);
View Full Code Here

     * Even if told to use the configuration properties for dfs.datanode,
     * MiniDFSCluster.startDataNodes() should use localhost as the default if
     * the dfs.datanode properties are not set.
     *------------------------------------------------------------------------*/
    for (int i = 0; i < dns.size(); i++) {
      DataNodeProperties dnp = cluster.stopDataNode(i);
      assertNotNull("Should have been able to stop simulated datanode", dnp);
    }

    conf.unset(DFS_DATANODE_ADDRESS_KEY);
    conf.unset(DFS_DATANODE_HTTP_ADDRESS_KEY);
    conf.unset(DFS_DATANODE_IPC_ADDRESS_KEY);

    cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
                           null, null, null, false, true);

    dns = cluster.getDataNodes();
    dn = dns.get(0);

    selfSocketAddr = dn.getXferAddress().toString();
    System.out.println("DN Self Socket Addr == " + selfSocketAddr);
    // assert that default self socket address is 127.0.0.1
    assertTrue(selfSocketAddr.contains("/127.0.0.1:"));

    /*-------------------------------------------------------------------------
     * Shut down the datanodes, reconfigure, and bring them back up.
     * This time, modify the dfs.datanode properties and make sure that they
     * are used to configure sockets by MiniDFSCluster.startDataNodes().
     *------------------------------------------------------------------------*/
    for (int i = 0; i < dns.size(); i++) {
      DataNodeProperties dnp = cluster.stopDataNode(i);
      assertNotNull("Should have been able to stop simulated datanode", dnp);
    }

    conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
    conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
View Full Code Here

    out.write(bytes);
    out.write(bytes);
    out.hflush();

    // Remove two DNs, to put them into the exclude list.
    DataNodeProperties two = cluster.stopDataNode(2);
    DataNodeProperties one = cluster.stopDataNode(1);

    // Write another block.
    // At this point, we have two nodes already in excluded list.
    out.write(bytes);
    out.write(bytes);
View Full Code Here

    clientConf.set(DFS_CLIENT_CONTEXT, "testSlowReader");
    DistributedFileSystem fs =
        (DistributedFileSystem)FileSystem.get(cluster.getURI(),
            clientConf);
    // Restart the DN with a shorter write timeout.
    DataNodeProperties props = cluster.stopDataNode(0);
    props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
        WRITE_TIMEOUT);
    props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
        120000);
    assertTrue(cluster.restartDataNode(props, true));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.