Package org.apache.hadoop.hdfs.MiniDFSCluster

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties


      cluster.waitActive(false);
     
      LOG.info("Bringing down first DN");
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here


     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the primary DN
      DataNodeProperties badDN = cluster.stopDataNode(0);
     
      // Truncate the block on the primary DN
      corruptDataNode(0, corrupt);

      // Start the DN back up
View Full Code Here

     
      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();
     
      // close the DNs
      DataNodeProperties badDN = cluster.stopDataNode(0);
      DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
      assertNotNull(badDN);
      assertNotNull(badDN2);
     
      // Truncate one of them as if its journal got corrupted
      corruptDataNode(0, CorruptionType.TRUNCATE_BLOCK_HALF);
View Full Code Here

      // Restore a default injector
      DataNodeFaultInjector.instance = new DataNodeFaultInjector();

      // Stop a datanode to simulate a failure.
      DataNodeProperties stoppedNode = cluster.stopDataNode(0);
     
      // Fetch VolumeBlockLocations
      locs = fs.getFileBlockStorageLocations(allLocs);
      assertEquals("Expected two HdfsBlockLocation for two 1-block files", 2,
          locs.length);
View Full Code Here

    RandomAccessFile raf = new RandomAccessFile(metafile, "rw");
    raf.setLength(metafile.length() - 20);
    raf.close();

    // restart DN to make replica to RWR
    DataNodeProperties dnProp = cluster.stopDataNode(0);
    cluster.restartDataNode(dnProp, true);

    // try to recover the lease
    DistributedFileSystem newdfs = (DistributedFileSystem) FileSystem
        .newInstance(cluster.getConfiguration(0));
View Full Code Here

    clientConf.set(DFS_CLIENT_CONTEXT, "testSlowReader");
    DistributedFileSystem fs =
        (DistributedFileSystem)FileSystem.get(cluster.getURI(),
            clientConf);
    // Restart the DN with a shorter write timeout.
    DataNodeProperties props = cluster.stopDataNode(0);
    props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
        WRITE_TIMEOUT);
    props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
        120000);
    assertTrue(cluster.restartDataNode(props, true));
View Full Code Here

      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
View Full Code Here

      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();

      // close the primary DN
      DataNodeProperties badDN = cluster.stopDataNode(0);

      // Truncate the block on the primary DN
      corruptDataNode(0, corrupt);

      // Start the DN back up
View Full Code Here

      DFSOutputStream dfso = (DFSOutputStream)stm.getWrappedStream();
      dfso.abortForTests();

      // close the DNs
      DataNodeProperties badDN = cluster.stopDataNode(0);
      DataNodeProperties badDN2 = cluster.stopDataNode(0); // what was 1 is now 0
      assertNotNull(badDN);
      assertNotNull(badDN2);

      // Truncate one of them as if its journal got corrupted
      corruptDataNode(0, CorruptionType.TRUNCATE_BLOCK_HALF);
View Full Code Here

    assertTrue("Need DFSOutputStream.getPipeline() for this test",
                getPipeline != null);
    Object repl = getPipeline.invoke(stm, new Object []{} /*NO_ARGS*/);
    DatanodeInfo[] pipeline = (DatanodeInfo[]) repl;
    assertTrue(pipeline.length == fs.getDefaultReplication());
    DataNodeProperties dnprop = dfsCluster.stopDataNode(pipeline[0].getName());
    assertTrue(dnprop != null);

    // this write should succeed, but trigger a log roll
    writeData(table, 2);
    long newFilenum = log.getFilenum();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.