Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.BlockSender


    try {
      LOG.info("Sending block " + block +
          " from " + sock.getLocalSocketAddress().toString() +
          " to " + sock.getRemoteSocketAddress().toString());
      BlockSender blockSender =
        new BlockSender(namespaceId, block, blockSize, 0, blockSize,
            corruptChecksumOk, chunkOffsetOK, verifyChecksum,
            transferToAllowed,
            metadataIn, new BlockSender.InputStreamFactory() {
          @Override
          public InputStream createStream(long offset)
          throws IOException {
            // we are passing 0 as the offset above,
            // so we can safely ignore
            // the offset passed
            return blockContents;
          }
        });

      // Header info
      out.writeShort(dataTransferVersion);
      out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
      if (dataTransferVersion >= DataTransferProtocol.FEDERATION_VERSION) {
        out.writeInt(namespaceId);
      }
      out.writeLong(block.getBlockId());
      out.writeLong(block.getGenerationStamp());
      out.writeInt(0);           // no pipelining
      out.writeBoolean(false);   // not part of recovery
      Text.writeString(out, ""); // client
      out.writeBoolean(true); // sending src node information
      DatanodeInfo srcNode = new DatanodeInfo();
      srcNode.write(out); // Write src node DatanodeInfo
      // write targets
      out.writeInt(0); // num targets
      // send data & checksum
      blockSender.sendBlock(out, baseStream, null, progress);

      LOG.info("Sent block " + block + " to " + datanode);
    } finally {
      sock.close();
      out.close();
View Full Code Here


    try {
      LOG.info("Sending block " + block +
          " from " + sock.getLocalSocketAddress().toString() +
          " to " + sock.getRemoteSocketAddress().toString());
      BlockSender blockSender =
        new BlockSender(namespaceId, block, blockSize, 0, blockSize,
            corruptChecksumOk, chunkOffsetOK, verifyChecksum,
            transferToAllowed, dataTransferVersion >= DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION,
            new BlockWithChecksumFileReader.InputStreamWithChecksumFactory() {
              @Override
              public InputStream createStream(long offset) throws IOException {
                // we are passing 0 as the offset above,
                // so we can safely ignore
                // the offset passed
                return blockContents;
              }

              @Override
              public DataInputStream getChecksumStream() throws IOException {
                return metadataIn;
              }

            @Override
            public BlockDataFile.Reader getBlockDataFileReader()
                throws IOException {
              return BlockDataFile.getDummyDataFileFromFileChannel(
                  blockContents.getChannel()).getReader(null);
            }
          });

      WriteBlockHeader header = new WriteBlockHeader(new VersionAndOpcode(
          dataTransferVersion, DataTransferProtocol.OP_WRITE_BLOCK));
      header.set(namespaceId, block.getBlockId(), block.getGenerationStamp(),
          0, false, true, new DatanodeInfo(), 0, null, "");
      header.writeVersionAndOpCode(out);
      header.write(out);
      blockSender.sendBlock(out, baseStream, null, progress);

      LOG.info("Sent block " + block + " to " + datanode);
    } finally {
      sock.close();
      out.close();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.BlockSender

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.