Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor$DatanodeIndex


      Thread.sleep(5000);
      ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
          .getDecommissioningNodes();
      if (iteration == 0) {
        assertEquals(decommissioningNodes.size(), 1);
        DatanodeDescriptor decommNode = decommissioningNodes.get(0);
        checkDecommissionStatus(decommNode, 4, 0, 2);
      } else {
        assertEquals(decommissioningNodes.size(), 2);
        DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
        DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
        checkDecommissionStatus(decommNode1, 4, 4, 2);
        checkDecommissionStatus(decommNode2, 4, 4, 2);
      }
    }
    // Call refreshNodes on FSNamesystem with empty exclude file.
View Full Code Here


      List<Long> block_ids = new LinkedList<Long>();
      for (int i=0;i<NUM_BLOCK_IDS;i++) {
        block_ids.add((long)i);
      }
     
      DatanodeDescriptor dn1 = new DatanodeDescriptor();
      DatanodeDescriptor dn2 = new DatanodeDescriptor();
      DatanodeDescriptor dn3 = new DatanodeDescriptor();
     
      crm.addToCorruptReplicasMap(getBlock(0), dn1);
      assertEquals("Number of corrupt blocks not returning correctly",
                   1, crm.size());
      crm.addToCorruptReplicasMap(getBlock(1), dn1);
View Full Code Here

    // flush to make sure a block is allocated.
    ((DFSOutputStream)(out.getWrappedStream())).hflush();
   
    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
    DatanodeDescriptor dn = dnList.get(0);
   
    assertEquals(1, dn.getBlocksScheduled());
  
    // close the file and the counter should go to zero.
    out.close();  
    assertEquals(0, dn.getBlocksScheduled());
  }
View Full Code Here

      throws Exception {
    FSNamesystem namesystem = cluster.getNameNode().namesystem;
    for (DataNode node : cluster.getDataNodes()) {
      // Get old descriptor.
      DatanodeID dnId = createDataNodeID(node);
      DatanodeDescriptor dnDs = namesystem.getDatanode(dnId);

      // Create new id and descriptor.
      DatanodeID newId = new DatanodeID(node.getMachineName(),
          dnDs.getStorageID(), dnDs.getInfoPort(), dnDs.getIpcPort());
      DatanodeDescriptor newDS = new DatanodeDescriptor(newId,
          dnDs.getNetworkLocation(), dnDs.getHostName(), dnDs.getCapacity(),
          dnDs.getDfsUsed(), dnDs.getRemaining(), dnDs.getNamespaceUsed(),
          dnDs.getXceiverCount());
     
      newDS.isAlive = true;
View Full Code Here

  private long[] createRandomFileDispersed(Path file, int numBlocks,
               DatanodeDescriptor primaryNode, DatanodeDescriptor altNode)
      throws IOException, InterruptedException {
   
    BlockPlacementPolicyFakeData bp = BlockPlacementPolicyFakeData.lastInstance;
    DatanodeDescriptor tmp = bp.overridingDatanode;
   
    final int repl = 1;
    long[] crcs = new long[numBlocks];
    CRC32 crc = new CRC32();
    Random rand = new Random();
View Full Code Here

      deleteList.add(block.getBlock());
    }

    assertEquals(lbks.locatedBlockCount(),
        dn.getFSDataset().getBlockReport(namespaceID).length);
    DatanodeDescriptor dnDs = cluster.getNameNode().namesystem.getDatanode(dnId);
    dnDs.addBlocksToBeInvalidated(deleteList);

    // Make sure all blocks are deleted.
    while(dn.getFSDataset().getBlockReport(namespaceID).length != 0) {
      Thread.sleep(1000);
    }
View Full Code Here

      // get datanode info
      DatanodeInfo[] dinfo = namenode.getDatanodeReport(DatanodeReportType.ALL);

      for (int i = 0; i < dinfo.length; i++) {
        DatanodeDescriptor desc = cluster.getNameNode().getNamesystem()
            .getDatanode(dinfo[i]);
        assertEquals(numFiles, desc.numBlocks());
      }

      // save block ids
      HashSet<Long> blocks = new HashSet<Long>();
      DatanodeDescriptor dd = cluster.getNameNode().getNamesystem()
          .getDatanode(dinfo[0]);
      for (Iterator<BlockInfo> iter = dd.getBlockIterator(); iter.hasNext();) {
        blocks.add(iter.next().getBlockId());
      }

      // restart datanodes and remove it from namesystem
      for (int i = 0; i < dinfo.length; i++) {
        namenode.getNamesystem().removeDatanode(dinfo[i]);
        DatanodeDescriptor desc = cluster.getNameNode().getNamesystem()
            .getDatanode(dinfo[i]);
        assertEquals(0, desc.numBlocks());
      }

      h.clearEvents();
      cluster.restartDataNodes();
      long start = System.currentTimeMillis();
      while (!h.processedEvents
          .containsKey(InjectionEvent.FSNAMESYSTEM_BLOCKREPORT_COMPLETED)
          && !(numNodes == h.processedEvents
              .get(InjectionEvent.FSNAMESYSTEM_BLOCKREPORT_COMPLETED))
          && System.currentTimeMillis() - start < 30000) {
        DFSTestUtil.waitNMilliSecond(100);
      }

      // blocks are back
      // we need to get the report again (re-registration)
      dinfo = namenode.getDatanodeReport(DatanodeReportType.ALL);
      for (int i = 0; i < dinfo.length; i++) {
        DatanodeDescriptor desc = cluster.getNameNode().getNamesystem()
            .getDatanode(dinfo[i]);
        // block number matches
        assertEquals(numFiles, desc.numBlocks());
       
        // blocks for this datanode
        HashSet<Long> blocksAfter = new HashSet<Long>();
        for (Iterator<BlockInfo> iter = desc.getBlockIterator(); iter.hasNext();) {
          BlockInfo bi = iter.next();
          blocksAfter.add(bi.getBlockId());
          // datanode must be listed as a location for this block
          assertTrue(0 <= bi.findDatanode(desc));
        }
View Full Code Here

      Thread.sleep(5000);
      ArrayList<DatanodeDescriptor> decommissioningNodes = fsn
          .getDecommissioningNodesList();
      if (iteration == 0) {
        assertEquals(decommissioningNodes.size(), 1);
        DatanodeDescriptor decommNode = decommissioningNodes.get(0);
        checkDecommissionStatus(decommNode, 4, 0, 2);
      } else {
        assertEquals(decommissioningNodes.size(), 2);
        DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
        DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
        checkDecommissionStatus(decommNode1, 4, 4, 2);
        checkDecommissionStatus(decommNode2, 4, 4, 2);
      }
    }
    // Call refreshNodes on FSNamesystem with empty exclude file.
View Full Code Here

    // flush to make sure a block is allocated.
    ((DFSOutputStream)(out.getWrappedStream())).sync();
   
    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
    cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
    DatanodeDescriptor dn = dnList.get(0);
   
    assertEquals(1, dn.getBlocksScheduled());
  
    // close the file and the counter should go to zero.
    out.close();  
    assertEquals(0, dn.getBlocksScheduled());
  }
View Full Code Here

      throws Exception {
    FSNamesystem namesystem = cluster.getNameNode().namesystem;
    for (DataNode node : cluster.getDataNodes()) {
      // Get old descriptor.
      DatanodeID dnId = createDataNodeID(node);
      DatanodeDescriptor dnDs = namesystem.getDatanode(dnId);

      // Create new id and descriptor.
      DatanodeID newId = new DatanodeID(node.getMachineName(),
          dnDs.getStorageID(), dnDs.getInfoPort(), dnDs.getIpcPort());
      DatanodeDescriptor newDS = new DatanodeDescriptor(newId,
          dnDs.getNetworkLocation(), dnDs.getHostName(), dnDs.getCapacity(),
          dnDs.getDfsUsed(), dnDs.getRemaining(), dnDs.getNamespaceUsed(),
          dnDs.getXceiverCount());
     
      newDS.isAlive = true;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor$DatanodeIndex

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.