Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo


    try {
      resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
    } catch (ServiceException e) {
      throw ProtobufHelper.getRemoteException(e);
    }
    return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
        resp.getLocalPath(), resp.getLocalMetaPath());
  }
View Full Code Here


  @Override
  public GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
      RpcController unused, GetBlockLocalPathInfoRequestProto request)
      throws ServiceException {
    BlockLocalPathInfo resp;
    try {
      resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken()));
    } catch (IOException e) {
      throw new ServiceException(e);
    }
    return GetBlockLocalPathInfoResponseProto.newBuilder()
        .setBlock(PBHelper.convert(resp.getBlock()))
        .setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
        .build();
  }
View Full Code Here

  @Override
  public GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
      RpcController unused, GetBlockLocalPathInfoRequestProto request)
      throws ServiceException {
    BlockLocalPathInfo resp;
    try {
      resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken()));
    } catch (IOException e) {
      throw new ServiceException(e);
    }
    return GetBlockLocalPathInfoResponseProto.newBuilder()
        .setBlock(PBHelper.convert(resp.getBlock()))
        .setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
        .build();
  }
View Full Code Here

    try {
      resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
    } catch (ServiceException e) {
      throw ProtobufHelper.getRemoteException(e);
    }
    return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
        resp.getLocalPath(), resp.getLocalMetaPath());
  }
View Full Code Here

      Token<BlockTokenIdentifier> token, DatanodeInfo node,
      long startOffset, long length) throws IOException {
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
        .getIpcPort());
    // check the cache first
    BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
    if (pathinfo == null) {
      if (userGroupInformation == null) {
        userGroupInformation = UserGroupInformation.getCurrentUser();
      }
      pathinfo = getBlockPathInfo(userGroupInformation, blk, node,
          configuration, conf.hdfsTimeout, token,
          conf.connectToDnViaHostname);
    }

    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occurring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will re-populate the cache.
    FileInputStream dataIn = null;
    FileInputStream checksumIn = null;
    BlockReaderLocalLegacy localBlockReader = null;
    boolean skipChecksumCheck = conf.skipShortCircuitChecksums;
    try {
      // get a local file system
      File blkfile = new File(pathinfo.getBlockPath());
      dataIn = new FileInputStream(blkfile);

      if (LOG.isDebugEnabled()) {
        LOG.debug("New BlockReaderLocalLegacy for file " + blkfile + " of size "
            + blkfile.length() + " startOffset " + startOffset + " length "
            + length + " short circuit checksum " + !skipChecksumCheck);
      }

      if (!skipChecksumCheck) {
        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        checksumIn = new FileInputStream(metafile);

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader
            .readHeader(new DataInputStream(checksumIn));
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        long firstChunkOffset = startOffset
            - (startOffset % checksum.getBytesPerChecksum());
        localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token,
            startOffset, length, pathinfo, checksum, true, dataIn,
            firstChunkOffset, checksumIn);
      } else {
        localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token,
            startOffset, length, pathinfo, dataIn);
      }
    } catch (IOException e) {
      // remove from cache
      localDatanodeInfo.removeBlockLocalPathInfo(blk);
      DFSClient.LOG.warn("BlockReaderLocalLegacy: Removing " + blk
          + " from cache because local file " + pathinfo.getBlockPath()
          + " could not be opened.");
      throw e;
    } finally {
      if (localBlockReader == null) {
        if (dataIn != null) {
View Full Code Here

  private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
      ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
      Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
      throws IOException {
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
    BlockLocalPathInfo pathinfo = null;
    ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
        conf, timeout, connectToDnViaHostname);
    try {
      // make RPC to local datanode to find local pathnames of blocks
      pathinfo = proxy.getBlockLocalPathInfo(blk, token);
View Full Code Here

      int socketTimeout, long startOffset, long length) throws IOException {

    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
        .getIpcPort());
    // check the cache first
    BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
    if (pathinfo == null) {
      pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
    }

    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occurring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will re-populate the cache.
    FileInputStream dataIn = null;
    FileInputStream checksumIn = null;
    BlockReaderLocal localBlockReader = null;
    boolean skipChecksumCheck = skipChecksumCheck(conf);
    try {
      // get a local file system
      File blkfile = new File(pathinfo.getBlockPath());
      dataIn = new FileInputStream(blkfile);

      if (LOG.isDebugEnabled()) {
        LOG.debug("New BlockReaderLocal for file " + blkfile + " of size "
            + blkfile.length() + " startOffset " + startOffset + " length "
            + length + " short circuit checksum " + skipChecksumCheck);
      }

      if (!skipChecksumCheck) {
        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        checksumIn = new FileInputStream(metafile);

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader
            .readHeader(new DataInputStream(checksumIn));
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        long firstChunkOffset = startOffset
            - (startOffset % checksum.getBytesPerChecksum());
        localBlockReader = new BlockReaderLocal(conf, file, blk, token,
            startOffset, length, pathinfo, checksum, true, dataIn,
            firstChunkOffset, checksumIn);
      } else {
        localBlockReader = new BlockReaderLocal(conf, file, blk, token,
            startOffset, length, pathinfo, dataIn);
      }
    } catch (IOException e) {
      // remove from cache
      localDatanodeInfo.removeBlockLocalPathInfo(blk);
      DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk
          + " from cache because local file " + pathinfo.getBlockPath()
          + " could not be opened.");
      throw e;
    } finally {
      if (localBlockReader == null) {
        if (dataIn != null) {
View Full Code Here

 
  private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
      DatanodeInfo node, Configuration conf, int timeout,
      Token<BlockTokenIdentifier> token) throws IOException {
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
    BlockLocalPathInfo pathinfo = null;
    ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
        conf, timeout);
    try {
      // make RPC to local datanode to find local pathnames of blocks
      pathinfo = proxy.getBlockLocalPathInfo(blk, token);
View Full Code Here

                  dnInfo, conf, 60000);
            }
          });
     
      //This should succeed
      BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
      Assert.assertEquals(dn.data.getBlockLocalPathInfo(blk).getBlockPath(),
          blpi.getBlockPath());
      RPC.stopProxy(proxy);

      // Now try with a not allowed user.
      UserGroupInformation bUgi = UserGroupInformation
          .createRemoteUser("notalloweduser");
View Full Code Here

    String file, Block blk, Token<BlockTokenIdentifier> token, DatanodeInfo node,
    int socketTimeout, long startOffset, long length) throws IOException {
   
    LocalDatanodeInfo localDatanodeInfo =  getLocalDatanodeInfo(node.getIpcPort());
    // check the cache first
    BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
    if (pathinfo == null) {
      pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
    }

    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occurring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will re-populate the cache.
    FileInputStream dataIn = null;
    FileInputStream checksumIn = null;
    BlockReaderLocal localBlockReader = null;
    boolean skipChecksum = shortCircuitChecksum(conf);
    try {
      // get a local file system
      File blkfile = new File(pathinfo.getBlockPath());
      dataIn = new FileInputStream(blkfile);

      if (LOG.isDebugEnabled()) {
        LOG.debug("New BlockReaderLocal for file " + blkfile + " of size "
            + blkfile.length() + " startOffset " + startOffset + " length "
            + length + " short circuit checksum " + skipChecksum);
      }

      if (!skipChecksum) {
        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        checksumIn = new FileInputStream(metafile);

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader
            .readHeader(new DataInputStream(checksumIn));
        short version = header.getVersion();
        if (version != FSDataset.METADATA_VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
            pathinfo, checksum, true, dataIn, checksumIn);
      } else {
        localBlockReader = new BlockReaderLocal(conf, file, blk, token, startOffset, length,
            pathinfo, dataIn);
      }
    } catch (IOException e) {
      // remove from cache
      localDatanodeInfo.removeBlockLocalPathInfo(blk);
      DFSClient.LOG.warn("BlockReaderLocal: Removing " + blk +
          " from cache because local file " + pathinfo.getBlockPath() +
          " could not be opened.");
      throw e;
    } finally {
      if (localBlockReader == null) {
        if (dataIn != null) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.