Package org.apache.hadoop.hdfs.server.blockmanagement

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor$DecommissioningStatus


    cluster.getNamesystem().readLock();
    try {
      final DatanodeManager datanodeManager =
          cluster.getNamesystem().getBlockManager().getDatanodeManager();
      for (DataNode dn : cluster.getDataNodes()) {
        DatanodeDescriptor descriptor =
            datanodeManager.getDatanode(dn.getDatanodeId());
        Assert.assertTrue(descriptor.getPendingCached().isEmpty());
      }
    } finally {
      cluster.getNamesystem().readUnlock();
    }
  }
View Full Code Here


     
      // shutdown half the nodes and force a heartbeat check to ensure
      // counts are accurate
      for (int i=0; i < nodes/2; i++) {
        DataNode dn = datanodes.get(i);
        DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
        dn.shutdown();
        dnd.setLastUpdate(0L);
        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
        expectedInServiceNodes--;
        assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      }

      // restart the nodes to verify that counts are correct after
      // node re-registration
      cluster.restartDataNodes();
      cluster.waitActive();
      datanodes = cluster.getDataNodes();
      expectedInServiceNodes = nodes;
      assertEquals(nodes, datanodes.size());
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes, namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceLoad,
          namesystem.getInServiceXceiverAverage(), EPSILON);
     
      // create streams and hsync to force datastreamers to start
      DFSOutputStream[] streams = new DFSOutputStream[fileCount];
      for (int i=0; i < fileCount; i++) {
        streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
            .getWrappedStream();
        streams[i].write("1".getBytes());
        streams[i].hsync();
        // the load for writers is 2 because both the write xceiver & packet
        // responder threads are counted in the load
        expectedTotalLoad += 2*fileRepl;
        expectedInServiceLoad += 2*fileRepl;
      }
      // force nodes to send load update
      triggerHeartbeats(datanodes);
      assertEquals(nodes, namesystem.getNumLiveDataNodes());
      assertEquals(expectedInServiceNodes,
          namesystem.getNumDatanodesInService());
      assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
      assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
          namesystem.getInServiceXceiverAverage(), EPSILON);

      // decomm a few nodes, substract their load from the expected load,
      // trigger heartbeat to force load update
      for (int i=0; i < fileRepl; i++) {
        expectedInServiceNodes--;
        DatanodeDescriptor dnd =
            dnm.getDatanode(datanodes.get(i).getDatanodeId());
        expectedInServiceLoad -= dnd.getXceiverCount();
        dnm.startDecommission(dnd);
        DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
        Thread.sleep(100);
        assertEquals(nodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes,
            namesystem.getNumDatanodesInService());
        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
            namesystem.getInServiceXceiverAverage(), EPSILON);
      }
     
      // check expected load while closing each stream.  recalc expected
      // load based on whether the nodes in the pipeline are decomm
      for (int i=0; i < fileCount; i++) {
        int decomm = 0;
        for (DatanodeInfo dni : streams[i].getPipeline()) {
          DatanodeDescriptor dnd = dnm.getDatanode(dni);
          expectedTotalLoad -= 2;
          if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
            decomm++;
          } else {
            expectedInServiceLoad -= 2;
          }
        }
        try {
          streams[i].close();
        } catch (IOException ioe) {
          // nodes will go decommissioned even if there's a UC block whose
          // other locations are decommissioned too.  we'll ignore that
          // bug for now
          if (decomm < fileRepl) {
            throw ioe;
          }
        }
        triggerHeartbeats(datanodes);
        // verify node count and loads
        assertEquals(nodes, namesystem.getNumLiveDataNodes());
        assertEquals(expectedInServiceNodes,
            namesystem.getNumDatanodesInService());
        assertEquals(expectedTotalLoad, namesystem.getTotalLoad());
        assertEquals((double)expectedInServiceLoad/expectedInServiceNodes,
            namesystem.getInServiceXceiverAverage(), EPSILON);
      }

      // shutdown each node, verify node counts based on decomm state
      for (int i=0; i < nodes; i++) {
        DataNode dn = datanodes.get(i);
        dn.shutdown();
        // force it to appear dead so live count decreases
        DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
        dnDesc.setLastUpdate(0L);
        BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
        assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
        // first few nodes are already out of service
        if (i >= fileRepl) {
          expectedInServiceNodes--;
View Full Code Here

      throws LeaseExpiredException, NotReplicatedYetException,
      QuotaExceededException, SafeModeException, UnresolvedLinkException,
      IOException {
    long blockSize;
    int replication;
    DatanodeDescriptor clientNode = null;

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug("BLOCK* NameSystem.getAdditionalBlock: "
          + src + " inodeId " +  fileId  + " for " + clientName);
    }
View Full Code Here

      final int numAdditionalNodes, final String clientName
      ) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();

    final DatanodeDescriptor clientnode;
    final long preferredblocksize;
    final List<DatanodeStorageInfo> chosen;
    checkOperation(OperationCategory.READ);
    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
    readLock();
View Full Code Here

        ArrayList<String> trimmedStorages =
            new ArrayList<String>(newtargets.length);
        if (newtargets.length > 0) {
          for (int i = 0; i < newtargets.length; ++i) {
            // try to get targetNode
            DatanodeDescriptor targetNode =
                blockManager.getDatanodeManager().getDatanode(newtargets[i]);
            if (targetNode != null) {
              trimmedTargets.add(targetNode);
              trimmedStorages.add(newtargetstorages[i]);
            } else if (LOG.isDebugEnabled()) {
View Full Code Here

        1234, 2345, 3456, 4567);
    DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "datanode2",
        1235, 2346, 3457, 4568);

    // Setup DatanodeDescriptors with one storage each.
    DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1");
    DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2");

    // Update the DatanodeDescriptors with their attached storages.
    BlockManagerTestUtil.updateStorage(dnDesc1, new DatanodeStorage("dnStorage1"));
    BlockManagerTestUtil.updateStorage(dnDesc2, new DatanodeStorage("dnStorage2"));

    DatanodeStorage dns1 = new DatanodeStorage("dnStorage1");
    DatanodeStorage dns2 = new DatanodeStorage("dnStorage2");

    StorageReport[] report1 = new StorageReport[] {
        new StorageReport(dns1, false, 1024, 100, 924, 100)
    };
    StorageReport[] report2 = new StorageReport[] {
        new StorageReport(dns2, false, 2500, 200, 1848, 200)
    };
    dnDesc1.updateHeartbeat(report1, 5L, 3L, 10, 2);
    dnDesc2.updateHeartbeat(report2, 10L, 2L, 20, 1);

    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
    live.add(dnDesc1);
    live.add(dnDesc2);
     
View Full Code Here

        doc.startTag("replicas");
        for(final Iterator<DatanodeDescriptor> it = blockManager.datanodeIterator(block);
            it.hasNext(); ) {
          doc.startTag("replica");

          DatanodeDescriptor dd = it.next();

          doc.startTag("host_name");
          doc.pcdata(dd.getHostName());
          doc.endTag();

          boolean isCorrupt = blockManager.getCorruptReplicaBlockIds(0,
                                block.getBlockId()) != null;
         
View Full Code Here

                + " because the file exists");
          }
        }
      }

      final DatanodeDescriptor clientNode =
          blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);

      if (append && myFile != null) {
        return prepareFileForWrite(
            src, myFile, holder, clientMachine, clientNode, true);
View Full Code Here

      IOException {
    checkBlock(previous);
    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
    long fileLength, blockSize;
    int replication;
    DatanodeDescriptor clientNode = null;
    Block newBlock = null;

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug(
          "BLOCK* NameSystem.getAdditionalBlock: file "
          +src+" for "+clientName);
    }

    writeLock();
    try {
      checkOperation(OperationCategory.WRITE);

      if (isInSafeMode()) {
        throw new SafeModeException("Cannot add block to " + src, safeMode);
      }

      // have we exceeded the configured limit of fs objects.
      checkFsObjectLimit();

      INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
      BlockInfo lastBlockInFile = pendingFile.getLastBlock();
      if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
        // The block that the client claims is the current last block
        // doesn't match up with what we think is the last block. There are
        // three possibilities:
        // 1) This is the first block allocation of an append() pipeline
        //    which started appending exactly at a block boundary.
        //    In this case, the client isn't passed the previous block,
        //    so it makes the allocateBlock() call with previous=null.
        //    We can distinguish this since the last block of the file
        //    will be exactly a full block.
        // 2) This is a retry from a client that missed the response of a
        //    prior getAdditionalBlock() call, perhaps because of a network
        //    timeout, or because of an HA failover. In that case, we know
        //    by the fact that the client is re-issuing the RPC that it
        //    never began to write to the old block. Hence it is safe to
        //    abandon it and allocate a new one.
        // 3) This is an entirely bogus request/bug -- we should error out
        //    rather than potentially appending a new block with an empty
        //    one in the middle, etc

        BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
        if (previous == null &&
            lastBlockInFile != null &&
            lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
            lastBlockInFile.isComplete()) {
          // Case 1
          if (NameNode.stateChangeLog.isDebugEnabled()) {
             NameNode.stateChangeLog.debug(
                 "BLOCK* NameSystem.allocateBlock: handling block allocation" +
                 " writing to a file with a complete previous block: src=" +
                 src + " lastBlock=" + lastBlockInFile);
          }
        } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
          // Case 2
          if (lastBlockInFile.getNumBytes() != 0) {
            throw new IOException(
                "Request looked like a retry to allocate block " +
                lastBlockInFile + " but it already contains " +
                lastBlockInFile.getNumBytes() + " bytes");
          }

          // The retry case ("b" above) -- abandon the old block.
          NameNode.stateChangeLog.info("BLOCK* NameSystem.allocateBlock: " +
              "caught retry for allocation of a new block in " +
              src + ". Abandoning old block " + lastBlockInFile);
          dir.removeBlock(src, pendingFile, lastBlockInFile);
          dir.persistBlocks(src, pendingFile);
        } else {
         
          throw new IOException("Cannot allocate block in " + src + ": " +
              "passed 'previous' block " + previous + " does not match actual " +
              "last block in file " + lastBlockInFile);
        }
      }

      // commit the last block and complete it if it has minimum replicas
      commitOrCompleteLastBlock(pendingFile, previousBlock);

      //
      // If we fail this, bad things happen!
      //
      if (!checkFileProgress(pendingFile, false)) {
        throw new NotReplicatedYetException("Not replicated yet:" + src);
      }
      fileLength = pendingFile.computeContentSummary().getLength();
      blockSize = pendingFile.getPreferredBlockSize();
      clientNode = pendingFile.getClientNode();
      replication = pendingFile.getReplication();
    } finally {
      writeUnlock();
    }

    // choose targets for the new block to be allocated.
    final DatanodeDescriptor targets[] = blockManager.chooseTarget(
        src, replication, clientNode, excludedNodes, blockSize);

    // Allocate a new block and record it in the INode.
    writeLock();
    try {
View Full Code Here

      final int numAdditionalNodes, final String clientName
      ) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();

    final DatanodeDescriptor clientnode;
    final long preferredblocksize;
    final List<DatanodeDescriptor> chosen;
    readLock();
    try {
      checkOperation(OperationCategory.WRITE);
      //check safe mode
      if (isInSafeMode()) {
        throw new SafeModeException("Cannot add datanode; src=" + src
            + ", blk=" + blk, safeMode);
      }

      //check lease
      final INodeFileUnderConstruction file = checkLease(src, clientName);
      clientnode = file.getClientNode();
      preferredblocksize = file.getPreferredBlockSize();

      //find datanode descriptors
      chosen = new ArrayList<DatanodeDescriptor>();
      for(DatanodeInfo d : existings) {
        final DatanodeDescriptor descriptor = blockManager.getDatanodeManager(
            ).getDatanode(d);
        if (descriptor != null) {
          chosen.add(descriptor);
        }
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor$DecommissioningStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.