Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


      // cached block locations may have been updated by chooseDataNode()
      // or fetchBlockAt(). Always get the latest list of locations at the
      // start of the loop.
      block = getBlockAt(block.getStartOffset(), false);
      DNAddrPair retval = chooseDataNode(block);
      DatanodeInfo chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;
      BlockReader reader = null;
         
      try {
        Token<BlockTokenIdentifier> blockToken = block.getBlockToken();
View Full Code Here


   */
  @Override
  public synchronized boolean seekToNewSource(long targetPos) throws IOException {
    boolean markedDead = deadNodes.containsKey(currentNode);
    addToDeadNodes(currentNode);
    DatanodeInfo oldNode = currentNode;
    DatanodeInfo newNode = blockSeekTo(targetPos);
    if (!markedDead) {
      /* remove it from deadNodes. blockSeekTo could have cleared
       * deadNodes and added currentNode again. Thats ok. */
      deadNodes.remove(oldNode);
    }
    if (!oldNode.getStorageID().equals(newNode.getStorageID())) {
      currentNode = newNode;
      return true;
    } else {
      return false;
    }
View Full Code Here

  }

  @Override
  public GetBlocksResponseProto getBlocks(RpcController unused,
      GetBlocksRequestProto request) throws ServiceException {
    DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
        .getDatanode()));
    BlocksWithLocations blocks;
    try {
      blocks = impl.getBlocks(dnInfo, request.getSize());
    } catch (IOException e) {
View Full Code Here

    }
  }
 
  static public DatanodeInfo convert(DatanodeInfoProto di) {
    if (di == null) return null;
    return new DatanodeInfo(
        PBHelper.convert(di.getId()),
        di.hasLocation() ? di.getLocation() : null ,
        di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
        di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
        PBHelper.convert(di.getAdminState()));
View Full Code Here

  }

  public static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom,
      Configuration conf) throws IOException {
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    DatanodeInfo chosenNode = null;
    int failures = 0;
    Socket s = null;
    int index = -1;
    if (nodes == null || nodes.length == 0) {
      throw new IOException("No nodes contain this block");
    }
    while (s == null) {
      if (chosenNode == null) {
        do {
          if (doRandom) {
            index = DFSUtil.getRandom().nextInt(nodes.length);
          } else {
            index++;
          }
          chosenNode = nodes[index];
        } while (deadNodes.contains(chosenNode));
      }
      chosenNode = nodes[index];

      //just ping to check whether the node is alive
      InetSocketAddress targetAddr = NetUtils.createSocketAddr(
          chosenNode.getInfoAddr());
       
      try {
        s = NetUtils.getDefaultSocketFactory(conf).createSocket();
        s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
View Full Code Here

  @Override // ClientProtocol
  public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
  throws IOException {
    namesystem.checkOperation(OperationCategory.UNCHECKED);
    DatanodeInfo results[] = namesystem.datanodeReport(type);
    if (results == null ) {
      throw new IOException("Cannot find datanode report");
    }
    return results;
  }
View Full Code Here

 
  /* Shuffle datanode array */
  static private void shuffleArray(DatanodeInfo[] datanodes) {
    for (int i=datanodes.length; i>1; i--) {
      int randomIndex = DFSUtil.getRandom().nextInt(i);
      DatanodeInfo tmp = datanodes[randomIndex];
      datanodes[randomIndex] = datanodes[i-1];
      datanodes[i-1] = tmp;
    }
  }
View Full Code Here

      //find the new datanode
      final int d = findNewDatanode(original);

      //transfer replica
      final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
      final DatanodeInfo[] targets = {nodes[d]};
      transfer(src, targets, lb.getBlockToken());
    }
View Full Code Here

    /* Now we find out source, target, and block, we need to find a proxy
     *
     * @return true if a proxy is found; otherwise false
     */
    private boolean chooseProxySource() {
      final DatanodeInfo targetDN = target.getDatanode();
      // if node group is supported, first try add nodes in the same node group
      if (cluster.isNodeGroupAware()) {
        for (BalancerDatanode loc : block.getLocations()) {
          if (cluster.isOnSameNodeGroup(loc.getDatanode(), targetDN) && addTo(loc)) {
            return true;
View Full Code Here

     */
    private int transferBlocks( Block blocks[],
                                DatanodeInfo xferTargets[][]
                              ) throws IOException {
      for(int i = 0; i < blocks.length; i++) {
        DatanodeInfo blockTargets[] = xferTargets[i];
        for(int t = 0; t < blockTargets.length; t++) {
          DatanodeInfo dnInfo = blockTargets[t];
          DatanodeRegistration receivedDNReg;
          receivedDNReg = new DatanodeRegistration(dnInfo.getName());
          receivedDNReg.setStorageInfo(
                          new DataStorage(nsInfo, dnInfo.getStorageID()));
          receivedDNReg.setInfoPort(dnInfo.getInfoPort());
          nameNode.blockReceived( receivedDNReg,
                                  new Block[] {blocks[i]},
                                  new String[] {DataNode.EMPTY_DEL_HINT});
        }
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.