Package org.apache.hadoop.hdfs.server.blockmanagement

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor


   
    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
        ).getDatanodeManager();
    dm.fetchDatanodes(dnList, dnList, false);
    DatanodeDescriptor dn = dnList.get(0);
   
    assertEquals(1, dn.getBlocksScheduled());
  
    // close the file and the counter should go to zero.
    out.close();  
    assertEquals(0, dn.getBlocksScheduled());
  }
View Full Code Here


        origNodes.contains(pipeline[0]));
    assertEquals("Should have two targets", 3, pipeline.length);
   
    boolean foundOneOnRackA = false;
    for (int i = 1; i < pipeline.length; i++) {
      DatanodeDescriptor target = pipeline[i];
      if (rackA.contains(target)) {
        foundOneOnRackA = true;
      }
      assertFalse(decomNodes.contains(target));
      assertFalse(origNodes.contains(target));
View Full Code Here

    assertEquals("Should have three targets", 4, pipeline.length);
   
    boolean foundOneOnRackA = false;
    boolean foundOneOnRackB = false;
    for (int i = 1; i < pipeline.length; i++) {
      DatanodeDescriptor target = pipeline[i];
      if (rackA.contains(target)) {
        foundOneOnRackA = true;
      } else if (rackB.contains(target)) {
        foundOneOnRackB = true;
      }
View Full Code Here

        origNodes.contains(pipeline[0]));
    assertEquals("Should have 2 targets", 3, pipeline.length);
   
    boolean foundOneOnRackB = false;
    for (int i = 1; i < pipeline.length; i++) {
      DatanodeDescriptor target = pipeline[i];
      if (rackB.contains(target)) {
        foundOneOnRackB = true;
      }
      assertFalse(decomNodes.contains(target));
      assertFalse(origNodes.contains(target));
    }
   
    assertTrue("Should have at least one target on rack B. Pipeline: " +
        Joiner.on(",").join(pipeline),
        foundOneOnRackB);
   
    // Mark the block as received on the target nodes in the pipeline
    fulfillPipeline(blockInfo, pipeline);

    // the block is still under-replicated. Add a new node. This should allow
    // the third off-rack replica.
    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
    addNodes(ImmutableList.of(rackCNode));
    try {
      DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
      assertEquals(2, pipeline2.length);
      assertEquals(rackCNode, pipeline2[1]);
View Full Code Here

  private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
    // Originally on only nodes in rack A.
    List<DatanodeDescriptor> origNodes = rackA;
    BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
    DatanodeDescriptor pipeline[] = scheduleSingleReplication(blockInfo);
   
    assertEquals(2, pipeline.length); // single new copy
    assertTrue("Source of replication should be one of the nodes the block " +
        "was on. Was: " + pipeline[0],
        origNodes.contains(pipeline[0]));
View Full Code Here

  }
 
  @Test
  public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
    List<DatanodeDescriptor> nodes = ImmutableList.of(
        new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
        new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
        new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
        new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
        new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
        new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
      );
    addNodes(nodes);
    List<DatanodeDescriptor> origNodes = nodes.subList(0, 3);;
    for (int i = 0; i < NUM_TEST_ITERS; i++) {
      doTestSingleRackClusterIsSufficientlyReplicated(i, origNodes);
View Full Code Here

      // start two new nodes
      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
     
      // make sure that NN detects that the datanode is down
      BlockManagerTestUtil.noticeDeadDatanode(
          cluster.getNameNode(), datanode.getXferAddr());
     
      // the block will be replicated
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() == 0) {
        checkTimeout("excess replicas not detected");
      }
     
      // find out a non-excess node
      final Iterator<DatanodeDescriptor> iter = bm.blocksMap
          .nodeIterator(block.getLocalBlock());
      DatanodeDescriptor nonExcessDN = null;
      while (iter.hasNext()) {
        DatanodeDescriptor dn = iter.next();
        Collection<Block> blocks = bm.excessReplicateMap.get(dn.getStorageID());
        if (blocks == null || !blocks.contains(block) ) {
          nonExcessDN = dn;
          break;
        }
      }
View Full Code Here

      // Look into the block manager on the active node for the block
      // under construction.
     
      NameNode nn0 = cluster.getNameNode(0);
      ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
      DatanodeDescriptor expectedPrimary = getExpectedPrimaryNode(nn0, blk);
      LOG.info("Expecting block recovery to be triggered on DN " +
          expectedPrimary);
     
      // Find the corresponding DN daemon, and spy on its connection to the
      // active.
      DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
      DatanodeProtocolClientSideTranslatorPB nnSpy =
          DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
     
      // Delay the commitBlockSynchronization call
      DelayAnswer delayer = new DelayAnswer(LOG);
View Full Code Here

        storedBlock instanceof BlockInfoUnderConstruction);
    BlockInfoUnderConstruction ucBlock =
      (BlockInfoUnderConstruction)storedBlock;
    // We expect that the first indexed replica will be the one
    // to be in charge of the synchronization / recovery protocol.
    DatanodeDescriptor expectedPrimary = ucBlock.getExpectedLocations()[0];
    return expectedPrimary;
  }
View Full Code Here

      throws TimeoutException, InterruptedException {
    long stopTime = System.currentTimeMillis() + waitTime;
    FSNamesystem namesystem = cluster.getNamesystem();
    String state = alive ? "alive" : "dead";
    while (System.currentTimeMillis() < stopTime) {
      final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
          namesystem, nodeID);
      if (dd.isAlive == alive) {
        LOG.info("datanode " + nodeID + " is " + state);
        return;
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.