Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


      final DatanodeManager dm = getBlockManager().getDatanodeManager();     
      final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

      DatanodeInfo[] arr = new DatanodeInfo[results.size()];
      for (int i=0; i<arr.length; i++) {
        arr[i] = new DatanodeInfo(results.get(i));
      }
      return arr;
    } finally {
      readUnlock();
    }
View Full Code Here


      NameNode.stateChangeLog.info("*DIR* NameNode.reportBadBlocks");
      for (int i = 0; i < blocks.length; i++) {
        ExtendedBlock blk = blocks[i].getBlock();
        DatanodeInfo[] nodes = blocks[i].getLocations();
        for (int j = 0; j < nodes.length; j++) {
          DatanodeInfo dn = nodes[j];
          blockManager.findAndMarkBlockAsCorrupt(blk, dn,
              "client machine reported it");
        }
      }
    } finally {
View Full Code Here

  void reportBadBlocks(ExtendedBlock block) {
    if (bpRegistration == null) {
      return;
    }
    DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
    LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
   
    try {
      bpNamenode.reportBadBlocks(blocks)
    } catch (IOException e){
View Full Code Here

      final UserParam username, final DoAsParam doAsUser,
      final String path, final HttpOpParam.Op op, final long openOffset,
      final long blocksize,
      final Param<?, ?>... parameters) throws URISyntaxException, IOException {
    final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
    final DatanodeInfo dn = chooseDatanode(namenode, path, op, openOffset,
        blocksize, conf);

    final String delegationQuery;
    if (!UserGroupInformation.isSecurityEnabled()) {
      //security disabled
      delegationQuery = Param.toSortedString("&", doAsUser, username);
    } else if (delegation.getValue() != null) {
      //client has provided a token
      delegationQuery = "&" + delegation;
    } else {
      //generate a token
      final Token<? extends TokenIdentifier> t = generateDelegationToken(
          namenode, ugi, request.getUserPrincipal().getName());
      delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
    }
    final String query = op.toQueryString() + delegationQuery
        + "&" + new NamenodeRpcAddressParam(namenode)
        + Param.toSortedString("&", parameters);
    final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;

    final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
        uripath, query, null);
    if (LOG.isTraceEnabled()) {
      LOG.trace("redirectURI=" + uri);
    }
    return uri;
View Full Code Here

    // otherwise pick one with least space from priSet if it is not empty
    // otherwise one node with least space from remains
    boolean firstOne = true;
    while (nonExcess.size() - replication > 0) {
      // check if we can delete delNodeHint
      final DatanodeInfo cur;
      if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint)
          && (priSet.contains(delNodeHint)
              || (addedNode != null && !priSet.contains(addedNode))) ) {
        cur = delNodeHint;
      } else { // regular excessive replica removal
        cur = replicator.chooseReplicaToDelete(bc, b, replication,
            priSet, remains);
      }
      firstOne = false;

      // adjust rackmap, priSet, and remains
      String rack = cur.getNetworkLocation();
      final List<DatanodeDescriptor> datanodes = rackMap.get(rack);
      datanodes.remove(cur);
      if (datanodes.isEmpty()) {
        rackMap.remove(rack);
      }
View Full Code Here

    List<VolumeBlockLocationCallable> callables =
        new ArrayList<VolumeBlockLocationCallable>();
    for (Map.Entry<DatanodeInfo, List<LocatedBlock>> entry : datanodeBlocks
        .entrySet()) {
      // Construct RPC parameters
      DatanodeInfo datanode = entry.getKey();
      List<LocatedBlock> locatedBlocks = entry.getValue();
      List<ExtendedBlock> extendedBlocks =
          new ArrayList<ExtendedBlock>(locatedBlocks.size());
      List<Token<BlockTokenIdentifier>> dnTokens =
          new ArrayList<Token<BlockTokenIdentifier>>(
View Full Code Here

      try {
        HdfsBlocksMetadata metadata = future.get();
        metadatas.set(i, metadata);
      } catch (ExecutionException e) {
        VolumeBlockLocationCallable callable = callables.get(i);
        DatanodeInfo datanode = callable.getDatanodeInfo();
        Throwable t = e.getCause();
        if (t instanceof InvalidBlockTokenException) {
          LOG.warn("Invalid access token when trying to retrieve "
              + "information from datanode " + datanode.getIpcAddr(false));
          throw (InvalidBlockTokenException) t;
        }
        else if (t instanceof UnsupportedOperationException) {
          LOG.info("Datanode " + datanode.getIpcAddr(false) + " does not support"
              + " required #getHdfsBlocksMetadata() API");
          throw (UnsupportedOperationException) t;
        } else {
          LOG.info("Failed to connect to datanode " +
              datanode.getIpcAddr(false));
        }
        if (LOG.isDebugEnabled()) {
          LOG.debug("Could not fetch information from datanode", t);
        }
      } catch (InterruptedException e) {
View Full Code Here

    // into the Map returned to the caller
    Iterator<HdfsBlocksMetadata> metadatasIter = metadatas.iterator();
    Iterator<DatanodeInfo> datanodeIter = datanodeBlocks.keySet().iterator();
    while (metadatasIter.hasNext()) {
      HdfsBlocksMetadata metadata = metadatasIter.next();
      DatanodeInfo datanode = datanodeIter.next();
      // Check if metadata is valid
      if (metadata == null) {
        continue;
      }
      ExtendedBlock[] metaBlocks = metadata.getBlocks();
View Full Code Here

    }

    //
    // Connect to best DataNode for desired Block, with potential offset
    //
    DatanodeInfo chosenNode = null;
    int refetchToken = 1; // only need to get a new access token once
    int refetchEncryptionKey = 1; // only need to get a new encryption key once
   
    boolean connectFailedOnce = false;
View Full Code Here

  private DNAddrPair chooseDataNode(LocatedBlock block)
    throws IOException {
    while (true) {
      DatanodeInfo[] nodes = block.getLocations();
      try {
        DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
        final String dnAddr =
            chosenNode.getXferAddr(dfsClient.connectToDnViaHostname());
        if (DFSClient.LOG.isDebugEnabled()) {
          DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
        }
        InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
        return new DNAddrPair(chosenNode, targetAddr);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.