Package org.apache.hadoop.hdfs.server.blockmanagement

Examples of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor$CachedBlocksList


    return new DatanodeID("127.0.0.1", "localhost", "",
        port, port, port, port);
  }

  public static DatanodeDescriptor getLocalDatanodeDescriptor() {
    return new DatanodeDescriptor(getLocalDatanodeID());
  }
View Full Code Here


      int port, String rackLocation) {
    DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
    return new DatanodeDescriptor(dnId, rackLocation);
  }
View Full Code Here

              + " on client " + clientMachine
              + " because the file exists");
        }
      }

      final DatanodeDescriptor clientNode =
          blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);

      if (append && myFile != null) {
        return prepareFileForWrite(
            src, myFile, holder, clientMachine, clientNode, true);
View Full Code Here

      QuotaExceededException, SafeModeException, UnresolvedLinkException,
      IOException {
    checkBlock(previous);
    long fileLength, blockSize;
    int replication;
    DatanodeDescriptor clientNode = null;
    Block newBlock = null;

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug(
          "BLOCK* NameSystem.getAdditionalBlock: file "
          +src+" for "+clientName);
    }

    writeLock();
    try {
      if (isInSafeMode()) {
        throw new SafeModeException("Cannot add block to " + src, safeMode);
      }

      // have we exceeded the configured limit of fs objects.
      checkFsObjectLimit();

      INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);

      // commit the last block and complete it if it has minimum replicas
      commitOrCompleteLastBlock(pendingFile, ExtendedBlock.getLocalBlock(previous));

      //
      // If we fail this, bad things happen!
      //
      if (!checkFileProgress(pendingFile, false)) {
        throw new NotReplicatedYetException("Not replicated yet:" + src);
      }
      fileLength = pendingFile.computeContentSummary().getLength();
      blockSize = pendingFile.getPreferredBlockSize();
      clientNode = pendingFile.getClientNode();
      replication = (int)pendingFile.getReplication();
    } finally {
      writeUnlock();
    }

    // choose targets for the new block to be allocated.
    final DatanodeDescriptor targets[] = blockManager.chooseTarget(
        src, replication, clientNode, excludedNodes, blockSize);

    // Allocate a new block and record it in the INode.
    writeLock();
    try {
View Full Code Here

      final int numAdditionalNodes, final String clientName
      ) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();

    final DatanodeDescriptor clientnode;
    final long preferredblocksize;
    final List<DatanodeDescriptor> chosen;
    readLock();
    try {
      //check safe mode
      if (isInSafeMode()) {
        throw new SafeModeException("Cannot add datanode; src=" + src
            + ", blk=" + blk, safeMode);
      }

      //check lease
      final INodeFileUnderConstruction file = checkLease(src, clientName);
      clientnode = file.getClientNode();
      preferredblocksize = file.getPreferredBlockSize();

      //find datanode descriptors
      chosen = new ArrayList<DatanodeDescriptor>();
      for(DatanodeInfo d : existings) {
        final DatanodeDescriptor descriptor = blockManager.getDatanodeManager(
            ).getDatanode(d);
        if (descriptor != null) {
          chosen.add(descriptor);
        }
      }
View Full Code Here

    return new DatanodeID("127.0.0.1", "localhost", "",
        port, port, port);
  }

  public static DatanodeDescriptor getLocalDatanodeDescriptor() {
    return new DatanodeDescriptor(getLocalDatanodeID());
  }
View Full Code Here

  public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
      int port, String rackLocation) {
    DatanodeID dnId = new DatanodeID(ipAddr, "host", "", port,
        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
    return new DatanodeDescriptor(dnId, rackLocation);
  }
View Full Code Here

      // Look into the block manager on the active node for the block
      // under construction.
     
      NameNode nn0 = cluster.getNameNode(0);
      ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
      DatanodeDescriptor expectedPrimary = getExpectedPrimaryNode(nn0, blk);
      LOG.info("Expecting block recovery to be triggered on DN " +
          expectedPrimary);
     
      // Find the corresponding DN daemon, and spy on its connection to the
      // active.
      DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
      DatanodeProtocolClientSideTranslatorPB nnSpy =
          DataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
     
      // Delay the commitBlockSynchronization call
      DelayAnswer delayer = new DelayAnswer(LOG);
View Full Code Here

    BlockInfoUnderConstruction ucBlock =
      (BlockInfoUnderConstruction)storedBlock;
    // We expect that the replica with the most recent heart beat will be
    // the one to be in charge of the synchronization / recovery protocol.
    DatanodeDescriptor[] datanodes = ucBlock.getExpectedLocations();
    DatanodeDescriptor expectedPrimary = datanodes[0];
    long mostRecentLastUpdate = expectedPrimary.getLastUpdate();
    for (int i = 1; i < datanodes.length; i++) {
      if (datanodes[i].getLastUpdate() > mostRecentLastUpdate) {
        expectedPrimary = datanodes[i];
        mostRecentLastUpdate = expectedPrimary.getLastUpdate();
      }
    }
    return expectedPrimary;
  }
View Full Code Here

    }
  }
 
  @Test
  public void testContains() throws Exception {
    DatanodeDescriptor nodeNotInMap =
      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
    for (int i=0; i < dataNodes.length; i++) {
      assertTrue(cluster.contains(dataNodes[i]));
    }
    assertFalse(cluster.contains(nodeNotInMap));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor$CachedBlocksList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.