Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.ReadBlockHeader


    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    ReadBlockHeader header = new ReadBlockHeader(versionAndOpcode);
    header.readFields(in);
   
    int namespaceId = header.getNamespaceId();
    long blockId = header.getBlockId();
    Block block = new Block( blockId, 0 , header.getGenStamp());
    long startOffset = header.getStartOffset();
    long length = header.getLen();
    String clientName = header.getClientName();

    // send the block
    OutputStream baseStream = NetUtils.getOutputStream(s,
        datanode.socketWriteTimeout);
    DataOutputStream out = new DataOutputStream(
View Full Code Here


    // in and out will be closed when sock is closed (by the caller)
    DataOutputStream out = new DataOutputStream(
      new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

    //write the header.
    ReadBlockHeader readBlockHeader = new ReadBlockHeader(
        dataTransferVersion, namespaceId, blockId, genStamp, startOffset, len,
        clientName);
    readBlockHeader.writeVersionAndOpCode(out);
    readBlockHeader.write(out);
    out.flush();

    //
    // Get bytes in block, set streams
    //
View Full Code Here

    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    ReadBlockHeader header = new ReadBlockHeader(versionAndOpcode);
    header.readFields(in);
   
    int namespaceId = header.getNamespaceId();
    long blockId = header.getBlockId();
    Block block = new Block( blockId, 0 , header.getGenStamp());
    long startOffset = header.getStartOffset();
    long length = header.getLen();
    String clientName = header.getClientName();

    // send the block
    OutputStream baseStream = NetUtils.getOutputStream(s,
        datanode.socketWriteTimeout);
    DataOutputStream out = new DataOutputStream(
View Full Code Here

      // in and out will be closed when sock is closed (by the caller)
      DataOutputStream out = new DataOutputStream(
        new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

      //write the header.
      ReadBlockHeader readBlockHeader = new ReadBlockHeader(
          dataTransferVersion, namespaceId, blockId, genStamp, startOffset, len,
          clientName);
      readBlockHeader.writeVersionAndOpCode(out);
      readBlockHeader.write(out);
      out.flush();

      //
      // Get bytes in block, set streams
      //
View Full Code Here

    // Read in the header
    //
    long startNanoTime = System.nanoTime();
    long startTime = System.currentTimeMillis();
   
    ReadBlockHeader header = new ReadBlockHeader(versionAndOpcode);
    header.readFields(in);
   
    ReadOptions options = header.getReadOptions();
    boolean ioprioEnabled = !options.isIoprioDisabled();
    if (ioprioEnabled) {
      NativeIO.ioprioSetIfPossible(options.getIoprioClass(),
          options.getIoprioData());
    }

    int namespaceId = header.getNamespaceId();
    long blockId = header.getBlockId();
    Block block = new Block( blockId, 0 , header.getGenStamp());
    long startOffset = header.getStartOffset();
    long length = header.getLen();
    String clientName = header.getClientName();
    reuseConnection = header.getReuseConnection();
    boolean shouldProfile = header.getShouldProfile();
    FSDataNodeReadProfilingData dnData = shouldProfile ?
        new FSDataNodeReadProfilingData() : null;

    if (shouldProfile) {
      dnData.readVersionAndOpCodeTime = (startNanoTime - receiverStartTime) ;
      dnData.readBlockHeaderTime = (System.nanoTime() - startNanoTime);
      dnData.startProfiling();
    }
   
    // send the block
    OutputStream baseStream = NetUtils.getOutputStream(s,
        datanode.socketWriteTimeout);
    DataOutputStream out = new DataOutputStream(
                 new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));
   
    BlockSender blockSender = null;
    String clientTraceFmt = null;
    if (ClientTraceLog.isInfoEnabled()) {   
      if (remoteAddress == null) {
        getAddresses();
      }
      clientTraceFmt = clientName.length() > 0 ? String.format(
          DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d",
          "HDFS_READ", clientName, "%d",
          datanode.getDNRegistrationForNS(namespaceId).getStorageID(), block,
          "%d")
          :
          datanode.getDNRegistrationForNS(namespaceId)
          + " Served block " + block + " to " + s.getInetAddress();
    }
    updateCurrentThreadName("sending block " + block);
    InjectionHandler.processEvent(InjectionEvent.DATANODE_READ_BLOCK);
       
    try {
      try {
        blockSender = new BlockSender(namespaceId, block, startOffset, length,
            datanode.ignoreChecksumWhenRead, true, true, false,
            versionAndOpcode.getDataTransferVersion() >=
              DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION,
            false, datanode, clientTraceFmt);
        if (shouldProfile) {
          blockSender.enableReadProfiling(dnData);
        }
     } catch(IOException e) {
        sendResponse(s, (short) DataTransferProtocol.OP_STATUS_ERROR,
            datanode.socketWriteTimeout);
        throw e;
      }
      if (ClientTraceLog.isInfoEnabled()) {
        ClientTraceLog.info("Sending blocks. namespaceId: "
            + namespaceId + " block: " + block + " to " + remoteAddress);
      }

      out.writeShort(DataTransferProtocol.OP_STATUS_SUCCESS); // send op status
      if (shouldProfile) {
        dnData.startSendBlock();
      }
     
      long read = blockSender.sendBlock(out, baseStream, null); // send data
      if (shouldProfile) {
        dnData.endSendBlock();
      }
     
      // report finalization information and block length.
      ReplicaToRead replicaRead = blockSender.getReplicaToRead();
      if (replicaRead == null) {
        replicaRead = datanode.data.getReplicaToRead(namespaceId, block);
      }
      if (replicaRead == null) {
        throw new IOException("Can't find block " + block + " in volumeMap");
      }
     
      int fadvise = header.getReadOptions().getFadvise();
      if (fadvise != 0) {
        blockSender.fadviseStream(fadvise, startOffset, length);
      }

      boolean isBlockFinalized = replicaRead.isFinalized();
View Full Code Here

    // in and out will be closed when sock is closed (by the caller)
    DataOutputStream out = new DataOutputStream(
      new BufferedOutputStream(NetUtils.getOutputStream(sock,HdfsConstants.WRITE_TIMEOUT)));

    //write the header.
    ReadBlockHeader readBlockHeader = new ReadBlockHeader(
        dataTransferVersion, namespaceId, blockId, genStamp, startOffset, len,
        clientName, reuseConnection, cliData != null);
    readBlockHeader.setReadOptions(options);
    readBlockHeader.writeVersionAndOpCode(out);
    readBlockHeader.write(out);
    out.flush();

    //
    // Get bytes in block, set streams
    //
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.ReadBlockHeader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.