Package org.apache.hadoop.hdfs.protocol.DataTransferProtocol

Examples of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader


    int packetLen = len + numChunks*checksumSize + 4;
    boolean lastDataPacket = offset + len == endOffset && len > 0;
    pkt.clear();


    PacketHeader header = new PacketHeader(
      packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();
   
View Full Code Here


        buf, dataStart - checksumPos,
        PacketHeader.PKT_HEADER_LEN + pktLen - DFSClient.SIZE_OF_INTEGER);
      buf = null;
      buffer.mark();

      PacketHeader header = new PacketHeader(
        pktLen, offsetInBlock, seqno, lastPacketInBlock, dataLen);
      header.putInBuffer(buffer);
     
      buffer.reset();
      return buffer;
    }
View Full Code Here

    }

    // Read next packet if the previous packet has been read completely.
    if (dataLeft <= 0) {
      //Read packet headers.
      PacketHeader header = new PacketHeader();
      header.readFields(in);

      if (LOG.isDebugEnabled()) {
        LOG.debug("DFSClient readChunk got header " + header);
      }

      // Sanity check the lengths
      if (!header.sanityCheck(lastSeqNo)) {
           throw new IOException("BlockReader: error in packet header " +
                                 header);
      }

      lastSeqNo = header.getSeqno();
      dataLeft = header.getDataLen();
      adjustChecksumBytes(header.getDataLen());
      if (header.getDataLen() > 0) {
        IOUtils.readFully(in, checksumBytes.array(), 0,
                          checksumBytes.limit());
      }
    }
View Full Code Here

    int packetLen = len + numChunks*checksumSize + 4;
    boolean lastDataPacket = offset + len == endOffset && len > 0;
    pkt.clear();


    PacketHeader header = new PacketHeader(
      packetLen, offset, seqno, (len == 0), len);
    header.putInBuffer(pkt);

    int checksumOff = pkt.position();
    int checksumLen = numChunks * checksumSize;
    byte[] buf = pkt.array();
   
View Full Code Here

  private int receivePacket() throws IOException {
    // read the next packet
    readNextPacket();

    buf.mark();
    PacketHeader header = new PacketHeader();
    header.readFields(buf);
    int endOfHeader = buf.position();
    buf.reset();

    // Sanity check the header
    if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) {
      throw new IOException("Received an out-of-sequence packet for " + block +
          "from " + inAddr + " at offset " + header.getOffsetInBlock() +
          ". Expecting packet starting at " + replicaInfo.getNumBytes());
    }
    if (header.getDataLen() < 0) {
      throw new IOException("Got wrong length during writeBlock(" + block +
                            ") from " + inAddr + " at offset " +
                            header.getOffsetInBlock() + ": " +
                            header.getDataLen());
    }

    return receivePacket(
      header.getOffsetInBlock(),
      header.getSeqno(),
      header.isLastPacketInBlock(),
      header.getDataLen(), endOfHeader);
  }
View Full Code Here

  private void writeZeroLengthPacket(Block block, String description)
  throws IOException {
    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
    sendOut.writeInt(512);         // checksum size

    PacketHeader hdr = new PacketHeader(
      8,                   // size of packet
      block.getNumBytes(), // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0);                  // chunk length
    hdr.write(sendOut);
    sendOut.writeInt(0);           // zero checksum

    //ok finally write a block with 0 len
    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
View Full Code Here

        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
        new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
    sendOut.writeInt(512);

    PacketHeader hdr = new PacketHeader(
      4,     // size of packet
      0,     // offset in block,
      100,   // seqno
      false, // last packet
      -1 - random.nextInt(oneMil)); // bad datalen
    hdr.write(sendOut);

    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
    new PipelineAck(100, new Status[]{ERROR}).write(recvOut);
    sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId,
                 true);

    // test for writing a valid zero size block
    sendBuf.reset();
    recvBuf.reset();
    DataTransferProtocol.Sender.opWriteBlock(sendOut,
        new Block(++newBlockId), 0,
        BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, 0L, 0L, "cl", null,
        new DatanodeInfo[1], BlockTokenSecretManager.DUMMY_TOKEN);
    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
    sendOut.writeInt(512);         // checksum size

    hdr = new PacketHeader(
      8,     // size of packet
      0,     // OffsetInBlock
      100,   // sequencenumber
      true,  // lastPacketInBlock
      0);    // chunk length
    hdr.write(sendOut);
    sendOut.writeInt(0);           // zero checksum
    sendOut.flush();
    //ok finally write a block with 0 len
    SUCCESS.write(recvOut);
    Text.writeString(recvOut, "");
View Full Code Here

    }
  }

  @Test
  public void testPacketHeader() throws IOException {
    PacketHeader hdr = new PacketHeader(
      4,                   // size of packet
      1024,                // OffsetInBlock
      100,                 // sequencenumber
      false,               // lastPacketInBlock
      4096);               // chunk length

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    hdr.write(new DataOutputStream(baos));

    // Read back using DataInput
    PacketHeader readBack = new PacketHeader();
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    readBack.readFields(new DataInputStream(bais));
    assertEquals(hdr, readBack);

    // Read back using ByteBuffer
    readBack = new PacketHeader();
    readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
    assertEquals(hdr, readBack);

    // Test sanity check for good header
    PacketHeader goodHeader = new PacketHeader(
      4,                   // size of packet
      0,                   // OffsetInBlock
      100,                 // sequencenumber
      true,                // lastPacketInBlock
      0);                  // chunk length
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.