Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.BlockPathInfo


      // verifies checksum file is of length 0
      LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol
          .getLastLocatedBlock(dfs.dfs.namenode, filestr);
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      BlockPathInfo blockPathInfo = datanode.getBlockPathInfo(lastblock);
      String blockPath = blockPathInfo.getBlockPath();
      String metaPath = blockPathInfo.getMetaPath();

      File f = new File(blockPath);
      File meta = new File(metaPath);
      assertEquals(0, f.length());
      // set the checksum file to 0
      meta.delete();
      DataOutputStream outs = new DataOutputStream(new FileOutputStream(
          metaPath, false));
      outs.close();

      // issue recovery and makit e sure it succeeds.
      int numTries = 500;
      for (int idxTry = 0; idxTry < numTries; idxTry++) {
        boolean success = dfs.recoverLease(filepath);
        if (success) {
          break;
        } else if (idxTry == numTries - 1) {
          TestCase.fail("Recovery lease failed");
        } else {
          Thread.sleep(10);
        }
      }

      // make sure the meta file is still empty
      locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.namenode, filestr);
      Block newBlock = locatedblock.getBlock();
      blockPathInfo = datanode.getBlockPathInfo(newBlock);
      assertEquals(0, blockPathInfo.getNumBytes());
      metaPath = blockPathInfo.getMetaPath();
      meta = new File(metaPath);
      assertEquals(0, meta.length());

      // make sure the file can be opened and read.
      InputStream in = dfs.open(new Path(filestr), 8);
View Full Code Here


      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      String filestr = "/testMissingChecksumFile";
      DFSTestUtil.creatFileAndWriteSomething(dfs, filestr, (short)2);

      BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr,
          cluster, dfs.dfs);
      String metaPath = blockPathInfo.getMetaPath();

      // Delete the checksum file
      File meta = new File(metaPath);
      meta.delete();
View Full Code Here

      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      String filestr = "/testCorruptChecksumFile";
      DFSTestUtil.creatFileAndWriteSomething(dfs, filestr, (short)2);

      BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr,
          cluster, dfs.dfs);
      String metaPath = blockPathInfo.getMetaPath();

      // Populate the checksum file
      File meta = new File(metaPath);
      meta.delete();
      OutputStream metaOut = new FileOutputStream(metaPath);
View Full Code Here

      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      String filestr = "/testDisableReadChecksum";
      DFSTestUtil.creatFileAndWriteSomething(dfs, filestr, (short)2);

      BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr,
          cluster, dfs.dfs);
      String metaPath = blockPathInfo.getMetaPath();

      // Pollute the checksum file
      File meta = new File(metaPath);
      meta.delete();
      OutputStream metaOut = new FileOutputStream(metaPath);
View Full Code Here

      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      String filestr = "/testCorruptInlineChecksumFile";
      DFSTestUtil.creatFileAndWriteSomething(dfs, filestr, (short)2);

      BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr, cluster, dfs.dfs);
      String blockPath = blockPathInfo.getBlockPath();

      // Populate the checksum file
      RandomAccessFile blockOut = new RandomAccessFile(blockPath, "rw");
      try {
        blockOut.seek(DataChecksum.getChecksumHeaderSize());
View Full Code Here

      DistributedFileSystem dfs = (DistributedFileSystem) cluster
          .getFileSystem();
      String filestr = "/testDisableReadInlineChecksum";
      DFSTestUtil.creatFileAndWriteSomething(dfs, filestr, (short)2);

      BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr, cluster, dfs.dfs);
      String blockPath = blockPathInfo.getBlockPath();

      // Populate the checksum file
      RandomAccessFile blockOut = new RandomAccessFile(blockPath, "rw");
      try {
        blockOut.seek(DataChecksum.getChecksumHeaderSize());
View Full Code Here

                            block.generationStamp);

      // make RPC to datanode to find local pathnames of blocks
      try {
        ClientDatanodeProtocol remote = getOrCreate(datanode.name);
        BlockPathInfo pathinfo = remote.getBlockPathInfo(namespaceId.id, blk);
        return new ThdfsBlockPath(pathinfo.getBlockPath(),
                                  pathinfo.getMetaPath());
      } catch (IOException e) {
        String msg = "Error getBlockPathInfo datanode " + datanode.name +
                     " namespaceid " + namespaceId.id +
                     " block " + blk;
        LOG.warn(msg);
View Full Code Here

    String metafilePath = "";
    if (!replica.isInlineChecksum()) {
      metafilePath = BlockWithChecksumFileWriter.getMetaFile(
          replica.getDataFileToRead(), block).getAbsolutePath();
    }
    BlockPathInfo info = new BlockPathInfo(block, replica.getDataFileToRead()
        .getAbsolutePath(), metafilePath);
    if (LOG.isDebugEnabled()) {
      LOG.debug("getBlockPathInfo successful block=" + block +
                " blockfile " + replica.getDataFileToRead().getAbsolutePath() +
                " metafile " + metafilePath);
View Full Code Here

    String filestr = "/testTransferZeroChecksumFile";
    DistributedFileSystem dfs = (DistributedFileSystem) fileSystem;

    DFSTestUtil.createFile(dfs, new Path(filestr), 9L, (short)1, 0L);

    BlockPathInfo blockPathInfo = DFSTestUtil.getBlockPathInfo(filestr,
        cluster, dfs.getClient());
   
    // Delete the checksum file
    RandomAccessFile meta = new RandomAccessFile(blockPathInfo.getMetaPath(), "rw");
    meta.setLength(0);
    meta.close();

    RandomAccessFile block = new RandomAccessFile(blockPathInfo.getBlockPath(), "rw");
    block.setLength(0);
    block.close();
   
    int ns = cluster.getNameNode().getNamespaceID();
    DataNode dnWithBlk = null, dnWithoutBlk = null;
    for (DataNode dn : cluster.getDataNodes()) {
      FSDataset fds = (FSDataset) dn.data;
      DatanodeBlockInfo dbi = fds.getDatanodeBlockInfo(ns, blockPathInfo);
     if (dbi != null) {
        dbi.syncInMemorySize();
        dnWithBlk = dn;
      } else {
        dnWithoutBlk = dn;
      }
    }
    if (dnWithoutBlk == null || dnWithBlk == null) {
      TestCase.fail();
    }
    DatanodeInfo[] list = new DatanodeInfo[1];
    for (DatanodeInfo di : dfs.getClient().datanodeReport(DatanodeReportType.LIVE)) {
      if (dnWithoutBlk.getPort() == di.getPort()) {
        list[0] = di;
        break;
      }
    }
    blockPathInfo.setNumBytes(0);
    dnWithBlk.transferBlocks(ns, new Block[] {blockPathInfo}, new DatanodeInfo[][] {list});
   
    long size = -1;
    for (int i = 0; i < 3; i++) {
      try {
View Full Code Here

      boolean clearOsBuffer,
      boolean positionalReadMode) throws IOException {

    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node);

    BlockPathInfo pathinfo = localDatanodeInfo.getOrComputePathInfo(namespaceid,
        blk, node, conf);
   
    // Another alternative is for datanode to pass whether it is an inline checksum
    // file and checksum metadata through BlockPathInfo, which is a cleaner approach.
    // However, we need to worry more about protocol compatible issue. We avoid this
    // trouble for now. We can always change to the other approach later.
    //
    boolean isInlineChecksum = Block.isInlineChecksumBlockFilename(new Path(
        pathinfo.getBlockPath()).getName());

    // check to see if the file exists. It may so happen that the
    // HDFS file has been deleted and this block-lookup is occuring
    // on behalf of a new HDFS file. This time, the block file could
    // be residing in a different portion of the fs.data.dir directory.
    // In this case, we remove this entry from the cache. The next
    // call to this method will repopulate the cache.
    try {

      // get a local file system
      FileChannel dataFileChannel;
      FileDescriptor dataFileDescriptor;
      File blkfile = new File(pathinfo.getBlockPath());
      FileInputStream fis = new FileInputStream(blkfile);
      dataFileChannel = fis.getChannel();
      dataFileDescriptor = fis.getFD();
     
      if (LOG.isDebugEnabled()) {
        LOG.debug("New BlockReaderLocal for file " +
            pathinfo.getBlockPath() + " of size " + blkfile.length() +
                  " startOffset " + startOffset +
                  " length " + length);
      }
     
      DataChecksum checksum = null;
      if (isInlineChecksum) {
        GenStampAndChecksum gac = BlockInlineChecksumReader
            .getGenStampAndChecksumFromInlineChecksumFile(new Path(pathinfo
                .getBlockPath()).getName());
        checksum = DataChecksum.newDataChecksum(gac.getChecksumType(),
            gac.getBytesPerChecksum());
       
        if (verifyChecksum) {

          return new BlockReaderLocalInlineChecksum(conf, file, blk,
              startOffset, length, pathinfo, metrics, checksum, verifyChecksum,
              dataFileChannel, dataFileDescriptor, clearOsBuffer,
              positionalReadMode);
        }
        else {
          return new BlockReaderLocalInlineChecksum(conf, file, blk,
              startOffset, length, pathinfo, metrics, checksum,
              dataFileChannel, dataFileDescriptor, clearOsBuffer,
              positionalReadMode);
        }
      } else if (verifyChecksum) {
        FileChannel checksumInChannel = null;
        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        FileInputStream checksumIn = new FileInputStream(metafile);
        checksumInChannel = checksumIn.getChannel();
        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(
            new DataInputStream(checksumIn), new NativeCrc32());
        short version = header.getVersion();

        if (version != FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        checksum = header.getChecksum();

        return new BlockReaderLocalWithChecksum(conf, file, blk, startOffset,
            length, pathinfo, metrics, checksum, verifyChecksum,
            dataFileChannel, dataFileDescriptor, checksumInChannel,
            clearOsBuffer, positionalReadMode);
      }
      else {
        return new BlockReaderLocalWithChecksum(conf, file, blk, startOffset,
            length, pathinfo, metrics, dataFileChannel, dataFileDescriptor,
            clearOsBuffer, positionalReadMode);
      }

    } catch (FileNotFoundException e) {
      localDatanodeInfo.removeBlockLocalPathInfo(namespaceid, blk);
      DFSClient.LOG.warn("BlockReaderLoca: Removing " + blk +
          " from cache because local file " +
          pathinfo.getBlockPath() +
          " could not be opened.");
      throw e;
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.BlockPathInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.