Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader


        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        checksumIn = new FileInputStream(metafile);

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader
            .readHeader(new DataInputStream(checksumIn));
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        long firstChunkOffset = startOffset
            - (startOffset % checksum.getBytesPerChecksum());
        localBlockReader = new BlockReaderLocal(conf, file, blk, token,
            startOffset, length, pathinfo, checksum, true, dataIn,
            firstChunkOffset, checksumIn);
View Full Code Here


      checksumIn = new DataInputStream(
          new BufferedInputStream(new FileInputStream(metaFile),
              HdfsConstants.IO_FILE_BUFFER_SIZE));

      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
      short version = header.getVersion();
      if (version != BlockMetadataHeader.VERSION) {
        FsDatasetImpl.LOG.warn("Wrong version (" + version + ") for metadata file "
            + metaFile + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();
      int bytesPerChecksum = checksum.getBytesPerChecksum();
      int checksumSize = checksum.getChecksumSize();
      long numChunks = Math.min(
          (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum,
          (metaFileLen - crcHeaderLen)/checksumSize);
View Full Code Here

  private static void verifyChecksum(long length,
      FileInputStream metaIn, FileChannel blockChannel, String blockFileName)
          throws IOException, ChecksumException {
    // Verify the checksum from the block's meta file
    // Get the DataChecksum from the meta file header
    BlockMetadataHeader header =
        BlockMetadataHeader.readHeader(new DataInputStream(
            new BufferedInputStream(metaIn, BlockMetadataHeader
                .getHeaderSize())));
    FileChannel metaChannel = metaIn.getChannel();
    if (metaChannel == null) {
      throw new IOException("Block InputStream meta file has no FileChannel.");
    }
    DataChecksum checksum = header.getChecksum();
    final int bytesPerChecksum = checksum.getBytesPerChecksum();
    final int checksumSize = checksum.getChecksumSize();
    final int numChunks = (8*1024*1024) / bytesPerChecksum;
    ByteBuffer blockBuf = ByteBuffer.allocate(numChunks*bytesPerChecksum);
    ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks*checksumSize);
 
View Full Code Here

      checksumIn = new DataInputStream(
          new BufferedInputStream(new FileInputStream(metaFile),
              HdfsConstants.IO_FILE_BUFFER_SIZE));

      // read and handle the common header here. For now just a version
      BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
      short version = header.getVersion();
      if (version != BlockMetadataHeader.VERSION) {
        FsDatasetImpl.LOG.warn("Wrong version (" + version + ") for metadata file "
            + metaFile + " ignoring ...");
      }
      DataChecksum checksum = header.getChecksum();
      int bytesPerChecksum = checksum.getBytesPerChecksum();
      int checksumSize = checksum.getChecksumSize();
      long numChunks = Math.min(
          (blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum,
          (metaFileLen - crcHeaderLen)/checksumSize);
View Full Code Here

    this.block = block;
    this.fisCache = fisCache;

    // read and handle the common header here. For now just a version
    checksumIn.getChannel().position(0);
    BlockMetadataHeader header = BlockMetadataHeader
        .readHeader(new DataInputStream(
            new BufferedInputStream(checksumIn,
                BlockMetadataHeader.getHeaderSize())));
    short version = header.getVersion();
    if (version != BlockMetadataHeader.VERSION) {
      throw new IOException("Wrong version (" + version + ") of the " +
          "metadata file for " + filename + ".");
    }
    if (!verifyChecksum) {
      this.verifyChecksum = false;
    } else {
      this.verifyChecksum = !conf.getBoolean(DFSConfigKeys.
          DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
        DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
    }
    long firstChunkOffset;
    if (this.verifyChecksum) {
      this.checksum = header.getChecksum();
      this.bytesPerChecksum = this.checksum.getBytesPerChecksum();
      this.checksumSize = this.checksum.getChecksumSize();
      firstChunkOffset = startOffset
          - (startOffset % checksum.getBytesPerChecksum());
      this.offsetFromChunkBoundary = (int) (startOffset - firstChunkOffset);
View Full Code Here

        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        checksumIn = new FileInputStream(metafile);

        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader
            .readHeader(new DataInputStream(checksumIn));
        short version = header.getVersion();
        if (version != BlockMetadataHeader.VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();
        long firstChunkOffset = startOffset
            - (startOffset % checksum.getBytesPerChecksum());
        localBlockReader = new BlockReaderLocalLegacy(conf, file, blk, token,
            startOffset, length, pathinfo, checksum, true, dataIn,
            firstChunkOffset, checksumIn);
View Full Code Here

        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        FileInputStream checksumIn = new FileInputStream(metafile);
   
        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(new DataInputStream(checksumIn), new PureJavaCrc32());
        short version = header.getVersion();
     
        if (version != FSDataset.METADATA_VERSION) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        DataChecksum checksum = header.getChecksum();

        return new BlockReaderLocal(conf, file, blk, startOffset, length,
            pathinfo, metrics, checksum, verifyChecksum, dataIn, checksumIn,
            clearOsBuffer);
      }
View Full Code Here

        // get the metadata file
        File metafile = new File(pathinfo.getMetaPath());
        FileInputStream checksumIn = new FileInputStream(metafile);
        checksumInChannel = checksumIn.getChannel();
        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(
            new DataInputStream(checksumIn), new NativeCrc32());
        short version = header.getVersion();

        if (version != FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
        }
        checksum = header.getChecksum();

        return new BlockReaderLocalWithChecksum(conf, file, blk, startOffset,
            length, pathinfo, metrics, checksum, verifyChecksum,
            dataFileChannel, dataFileDescriptor, checksumInChannel,
            clearOsBuffer, positionalReadMode);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.