Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata


      if (!isValid) {
        volumeIndex = Integer.MAX_VALUE;
      }
      blocksVolumendexes.add(volumeIndex);
    }
    return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
        blocksVolumeIds, blocksVolumendexes);
  }
View Full Code Here


  @Override
  public GetHdfsBlockLocationsResponseProto getHdfsBlockLocations(
      RpcController controller, GetHdfsBlockLocationsRequestProto request)
      throws ServiceException {
    HdfsBlocksMetadata resp;
    try {
      // Construct the Lists to make the actual call
      List<ExtendedBlock> blocks =
          new ArrayList<ExtendedBlock>(request.getBlocksCount());
      for (ExtendedBlockProto b : request.getBlocksList()) {
        blocks.add(PBHelper.convert(b));
      }
      List<Token<BlockTokenIdentifier>> tokens =
          new ArrayList<Token<BlockTokenIdentifier>>(request.getTokensCount());
      for (BlockTokenIdentifierProto b : request.getTokensList()) {
        tokens.add(PBHelper.convert(b));
      }
      // Call the real implementation
      resp = impl.getHdfsBlocksMetadata(blocks, tokens);
    } catch (IOException e) {
      throw new ServiceException(e);
    }
    List<ByteString> volumeIdsByteStrings =
        new ArrayList<ByteString>(resp.getVolumeIds().size());
    for (byte[] b : resp.getVolumeIds()) {
      volumeIdsByteStrings.add(ByteString.copyFrom(b));
    }
    // Build and return the response
    Builder builder = GetHdfsBlockLocationsResponseProto.newBuilder();
    builder.addAllVolumeIds(volumeIdsByteStrings);
    builder.addAllVolumeIndexes(resp.getVolumeIndexes());
    return builder.build();
  }
View Full Code Here

    }
    // Fill in metadatas with results from DN RPCs, where possible
    for (int i = 0; i < futures.size(); i++) {
      Future<HdfsBlocksMetadata> future = futures.get(i);
      try {
        HdfsBlocksMetadata metadata = future.get();
        metadatas.set(i, metadata);
      } catch (ExecutionException e) {
        VolumeBlockLocationCallable callable = callables.get(i);
        DatanodeInfo datanode = callable.getDatanodeInfo();
        Throwable t = e.getCause();
View Full Code Here

    // For each metadata, if it's valid, insert its volume location information
    // into the Map returned to the caller
    Iterator<HdfsBlocksMetadata> metadatasIter = metadatas.iterator();
    Iterator<DatanodeInfo> datanodeIter = datanodeBlocks.keySet().iterator();
    while (metadatasIter.hasNext()) {
      HdfsBlocksMetadata metadata = metadatasIter.next();
      DatanodeInfo datanode = datanodeIter.next();
      // Check if metadata is valid
      if (metadata == null) {
        continue;
      }
      ExtendedBlock[] metaBlocks = metadata.getBlocks();
      List<byte[]> metaVolumeIds = metadata.getVolumeIds();
      List<Integer> metaVolumeIndexes = metadata.getVolumeIndexes();
      // Add VolumeId for each replica in the HdfsBlocksMetadata
      for (int j = 0; j < metaBlocks.length; j++) {
        int volumeIndex = metaVolumeIndexes.get(j);
        ExtendedBlock extBlock = metaBlocks[j];
        // Skip if block wasn't found, or not a valid index into metaVolumeIds
View Full Code Here

      return datanode;
    }

    @Override
    public HdfsBlocksMetadata call() throws Exception {
      HdfsBlocksMetadata metadata = null;
      // Create the RPC proxy and make the RPC
      ClientDatanodeProtocol cdp = null;
      try {
        cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
            timeout, connectToDnViaHostname);
View Full Code Here

      volumeIds.add(bs.toByteArray());
    }
    // Array of indexes into the list of volumes, one per block
    List<Integer> volumeIndexes = response.getVolumeIndexesList();
    // Parsed HdfsVolumeId values, one per block
    return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
        volumeIds, volumeIndexes);
  }
View Full Code Here

    for (int i = 0; i < futures.size(); i++) {
      VolumeBlockLocationCallable callable = callables.get(i);
      DatanodeInfo datanode = callable.getDatanodeInfo();
      Future<HdfsBlocksMetadata> future = futures.get(i);
      try {
        HdfsBlocksMetadata metadata = future.get();
        metadatas.put(callable.getDatanodeInfo(), metadata);
      } catch (CancellationException e) {
        LOG.info("Cancelled while waiting for datanode "
            + datanode.getIpcAddr(false) + ": " + e.toString());
      } catch (ExecutionException e) {
View Full Code Here

    // Iterate through the list of metadatas (one per datanode).
    // For each metadata, if it's valid, insert its volume location information
    // into the Map returned to the caller
    for (Map.Entry<DatanodeInfo, HdfsBlocksMetadata> entry : metadatas.entrySet()) {
      DatanodeInfo datanode = entry.getKey();
      HdfsBlocksMetadata metadata = entry.getValue();
      // Check if metadata is valid
      if (metadata == null) {
        continue;
      }
      long[] metaBlockIds = metadata.getBlockIds();
      List<byte[]> metaVolumeIds = metadata.getVolumeIds();
      List<Integer> metaVolumeIndexes = metadata.getVolumeIndexes();
      // Add VolumeId for each replica in the HdfsBlocksMetadata
      for (int j = 0; j < metaBlockIds.length; j++) {
        int volumeIndex = metaVolumeIndexes.get(j);
        long blockId = metaBlockIds[j];
        // Skip if block wasn't found, or not a valid index into metaVolumeIds
View Full Code Here

      return datanode;
    }

    @Override
    public HdfsBlocksMetadata call() throws Exception {
      HdfsBlocksMetadata metadata = null;
      // Create the RPC proxy and make the RPC
      ClientDatanodeProtocol cdp = null;
      try {
        cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
            timeout, connectToDnViaHostname);
View Full Code Here

      if (!isValid) {
        volumeIndex = Integer.MAX_VALUE;
      }
      blocksVolumeIndexes.add(volumeIndex);
    }
    return new HdfsBlocksMetadata(blocks.toArray(new ExtendedBlock[] {}),
        blocksVolumeIds, blocksVolumeIndexes);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.