Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.stopDataNode()


        } catch (InterruptedException ignored) {
        }
      }

      // remove a datanode to force re-establishing pipeline
      cluster.stopDataNode(0);
      // write the rest of the file
      stm.write(rawData, mid, rawData.length - mid);
      stm.close();
      // check if write is successful
      FSDataInputStream in4 = fs.open(fileToWrite);
View Full Code Here


      DFSTestUtil.waitReplication(fs, fileName, (short)3);
     
      // corrupt the block on datanode 0
      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
      assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
      DataNodeProperties dnProps = cluster.stopDataNode(0);
      // remove block scanner log to trigger block scanning
      File scanLog = new File(MiniDFSCluster.getFinalizedDir(
          cluster.getInstanceStorageDir(0, 0),
          cluster.getNamesystem().getBlockPoolId()).getParent().toString()
          + "/../dncp_block_verification.log.prev");
View Full Code Here

      // calling removeDatanode and stopping it.
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      int idx = datanodes.size() - 1;
      DataNode dataNode = datanodes.get(idx);
      DatanodeID dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(idx);
      dm.removeDatanode(dnId);

      // The block should still have sufficient # replicas, across racks.
      // The last node may not have contained a replica, but if it did
      // it should have been replicated within the same rack.
View Full Code Here

      // only 1 rack for all the replicas
      datanodes = cluster.getDataNodes();
      idx = datanodes.size() - 1;
      dataNode = datanodes.get(idx);
      dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(idx);
      dm.removeDatanode(dnId);

      // Make sure we have enough live replicas even though we are
      // short one rack and therefore need one replica
      DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 1);
View Full Code Here

      // to heartbeat by stopping it and calling removeDatanode.
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      assertEquals(3, datanodes.size());
      DataNode dataNode = datanodes.get(2);
      DatanodeID dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(2);
      dm.removeDatanode(dnId);

      // The block gets re-replicated to another datanode so it has a
      // sufficient # replicas, but not across racks, so there should
      // be 1 rack, and 1 needed replica (even though there are 2 hosts
View Full Code Here

          out.hflush();
        }
       
       
        // Shutdown one of the nodes in the pipeline
        DataNodeProperties oldGenstampNode = cluster.stopDataNode(0);

        // Write some more data and flush again. This data will only
        // be in the latter genstamp copy of the blocks.
        for (int i = 0; i < streams.size(); i++) {
          Path path = testPaths.get(i);
View Full Code Here

        // Upon restart, there will be two replicas, one with an old genstamp
        // and one current copy. This test wants to ensure that the old genstamp
        // copy is the one that is deleted.

        LOG.info("=========================== restarting cluster");
        DataNodeProperties otherNode = cluster.stopDataNode(0);
        cluster.restartNameNode();
       
        // Restart the datanode with the corrupt replica first.
        cluster.restartDataNode(oldGenstampNode);
        cluster.waitActive();
View Full Code Here

      Thread.sleep(10000);
      DataNode dn = cluster.getDataNodes().get(0);
      assertTrue("Datanode should be running", dn.isDatanodeUp());
      assertEquals("BPOfferService should be running", 1,
          dn.getAllBpOs().length);
      DataNodeProperties dnProp = cluster.stopDataNode(0);

      cluster.getNameNode(0).stop();
      cluster.getNameNode(1).stop();
      Configuration nn1 = cluster.getConfiguration(0);
      Configuration nn2 = cluster.getConfiguration(1);
View Full Code Here

    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    FileSystem fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

    try {
      final Path fileName = new Path("/foo1");
      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
      DFSTestUtil.waitReplication(fs, fileName, (short) 3);
View Full Code Here

      // calling removeDatanode and stopping it.
      ArrayList<DataNode> datanodes = cluster.getDataNodes();
      int idx = datanodes.size() - 1;
      DataNode dataNode = datanodes.get(idx);
      DatanodeID dnId = dataNode.getDatanodeId();
      cluster.stopDataNode(idx);
      dm.removeDatanode(dnId);

      // The block should still have sufficient # replicas, across racks.
      // The last node may not have contained a replica, but if it did
      // it should have been replicated within the same rack.
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.