Package org.apache.hadoop.util

Examples of org.apache.hadoop.util.NativeCrc32


      int buffSize, boolean close, IOThrottler throttler)
    throws IOException {
   
    PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
    byte buf[] = new byte[buffSize];
    Checksum sum = new NativeCrc32();
    sum.reset();
    try {
      if (throttler != null) {
        throttler.throttle((long) buffSize);
      }
      int bytesRead = in.read(buf);
      while (bytesRead >= 0) {
        sum.update(buf, 0, bytesRead);
        out.write(buf, 0, bytesRead);
        if ((ps != null) && ps.checkError()) {
          throw new IOException("Unable to write to output stream.");
        }
        if (throttler != null) {
          throttler.throttle((long) buffSize);
        }
        bytesRead = in.read(buf);
      }
    } finally {
      if(close) {
        out.close();
        in.close();
      }
    }
    return sum.getValue();
  }
View Full Code Here


        metaIn.close();
      }
    }

    // compute crc of partial chunk from data read in the block file.
    Checksum partialCrc = new NativeCrc32();
    partialCrc.update(buf, 0, sizePartialChunk);
    LOG.info("Read in partial CRC chunk from disk for block " + block);

    // paranoia! verify that the pre-computed crc matches what we
    // recalculated just now
    if (partialCrc.getValue() != FSInputChecker.checksum2long(crcbuf)) {
      String msg = "Partial CRC " + partialCrc.getValue()
          + " does not match value computed the "
          + " last time file was closed "
          + FSInputChecker.checksum2long(crcbuf);
      throw new IOException(msg);
    }
    // LOG.debug("Partial CRC matches 0x" +
    // Long.toHexString(partialCrc.getValue()));
   
    partialCrcInt = (int) partialCrc.getValue();
  }
View Full Code Here

  private DFSOutputStream(DFSClient dfsClient, String src, long blockSize,
      Progressable progress, int bytesPerChecksum, short replication, boolean forceSync,
boolean doParallelWrites, DatanodeInfo[] favoredNodes,
      WriteOptions options)
  throws IOException {
    super(new NativeCrc32(), bytesPerChecksum, 4, getProfile(dfsClient));
    this.dfsClient = dfsClient;
    this.forceSync = forceSync;
    this.doParallelWrites = doParallelWrites;
    this.src = src;
    this.blockSize = blockSize;
    this.blockReplication = replication;
    this.progress = progress;
    this.options = options;
    this.pktIncludeVersion = dfsClient.ifPacketIncludeVersion();
    this.packetVersion = dfsClient.getOutPacketVersion();
   
    streamer = new DataStreamer();
   
    packetTimeout =
        dfsClient.conf.getLong("dfs.client.packet.timeout", 15000); // 15 seconds
    // try block recovery 5 times:
    maxRecoveryErrorCount =
        dfsClient.conf.getInt("dfs.client.block.recovery.retries", 5);
   
    if (progress != null) {
      DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "+src);
    }

    this.favoredNodes = favoredNodes;

    if ( bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) {
      throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum +
                            ") and blockSize(" + blockSize +
                            ") do not match. " + "blockSize should be a " +
                            "multiple of io.bytes.per.checksum");

    }
    checksum = DataChecksum.newDataChecksum(FSConstants.CHECKSUM_TYPE,
                                            bytesPerChecksum,
                                            new NativeCrc32());
  }
View Full Code Here

        File metafile = new File(pathinfo.getMetaPath());
        FileInputStream checksumIn = new FileInputStream(metafile);
        checksumInChannel = checksumIn.getChannel();
        // read and handle the common header here. For now just a version
        BlockMetadataHeader header = BlockMetadataHeader.readHeader(
            new DataInputStream(checksumIn), new NativeCrc32());
        short version = header.getVersion();

        if (version != FSDataset.FORMAT_VERSION_NON_INLINECHECKSUM) {
          LOG.warn("Wrong version (" + version + ") for metadata file for "
              + blk + " ignoring ...");
View Full Code Here

    */
    bytesPerChecksum = checksum.getBytesPerChecksum();
    if (bytesPerChecksum > 10*1024*1024 && bytesPerChecksum > blockLength){
      checksum = DataChecksum.newDataChecksum(checksum.getChecksumType(),
          Math.max((int)blockLength, 10*1024*1024),
          new NativeCrc32());
      bytesPerChecksum = checksum.getBytesPerChecksum();
    }

    checksumSize = checksum.getChecksumSize();
  }
View Full Code Here

                            ", remote=" + sock.getRemoteSocketAddress() +
                            " for file " + file +
                            " for block " + blockId);
    }

    DataChecksum checksum = DataChecksum.newDataChecksum( in , new NativeCrc32());
    //Warning when we get CHECKSUM_NULL?

    // Read the first chunk offset.
    long firstChunkOffset = in.readLong();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.util.NativeCrc32

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.