Examples of BlockOpResponseProto


Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

        }
       
        //transfer RBW
        final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
            oldrbw.getGenerationStamp());
        final BlockOpResponseProto s = DFSTestUtil.transferRbw(
            b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
        Assert.assertEquals(Status.SUCCESS, s.getStatus());
      }

      //check new rbw
      final ReplicaBeingWritten newrbw = getRbw(newnode, bpid);
      LOG.info("newrbw = " + newrbw);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

                + Op.BLOCK_CHECKSUM + ", block=" + block);
          }
          // get block MD5
          new Sender(out).blockChecksum(block, lb.getBlockToken());

          final BlockOpResponseProto reply =
            BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));

          if (reply.getStatus() != Status.SUCCESS) {
            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
              throw new InvalidBlockTokenException();
            } else {
              throw new IOException("Bad response " + reply + " for block "
                  + block + " from datanode " + datanodes[j]);
            }
          }
         
          OpBlockChecksumResponseProto checksumData =
            reply.getChecksumResponse();

          //read byte-per-checksum
          final int bpc = checksumData.getBytesPerCrc();
          if (i == 0) { //first block
            bytesPerCRC = bpc;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
          HdfsConstants.SMALL_BUFFER_SIZE));
      DataInputStream in = new DataInputStream(pair.in);
 
      new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
      final BlockOpResponseProto reply =
          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
     
      if (reply.getStatus() != Status.SUCCESS) {
        if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
          throw new InvalidBlockTokenException();
        } else {
          throw new IOException("Bad response " + reply + " trying to read "
              + lb.getBlock() + " from datanode " + dn);
        }
      }
     
      return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
    } finally {
      IOUtils.cleanup(null, pair.in, pair.out);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          mirrorOut.flush();

          // read connect ack (only for clients, not for replication req)
          if (isClient) {
            BlockOpResponseProto connectAck =
              BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(mirrorIn));
            mirrorInStatus = connectAck.getStatus();
            firstBadLink = connectAck.getFirstBadLink();
            if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
              LOG.info("Datanode " + targets.length +
                       " got response for connect ack " +
                       " from downstream datanode with firstbadlink as " +
                       firstBadLink);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

      /* send request to the proxy */
      new Sender(proxyOut).copyBlock(block, blockToken);

      // receive the response from the proxy
     
      BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
          HdfsProtoUtil.vintPrefixed(proxyReply));

      if (copyResponse.getStatus() != SUCCESS) {
        if (copyResponse.getStatus() == ERROR_ACCESS_TOKEN) {
          throw new IOException("Copy block " + block + " from "
              + proxySock.getRemoteSocketAddress()
              + " failed due to access token error");
        }
        throw new IOException("Copy block " + block + " from "
            + proxySock.getRemoteSocketAddress() + " failed");
      }
     
      // get checksum info about the block we're copying
      ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
      DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(
          checksumInfo.getChecksum());
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(
          block, proxyReply, proxySock.getRemoteSocketAddress().toString(),
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

    ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
      .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
      .setChunkOffset(blockSender.getOffset())
      .build();
     
    BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
      .setStatus(SUCCESS)
      .setReadOpChecksumInfo(ckInfo)
      .build();
    response.writeDelimitedTo(out);
    out.flush();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
            targets);
        out.flush();

        //ack
        BlockOpResponseProto response =
          BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));
        if (SUCCESS != response.getStatus()) {
          throw new IOException("Failed to add a datanode");
        }
      } finally {
        IOUtils.closeStream(in);
        IOUtils.closeStream(out);
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

          new Sender(out).writeBlock(block, accessToken, dfsClient.clientName,
              nodes, null, recoveryFlag? stage.getRecoveryStage() : stage,
              nodes.length, block.getNumBytes(), bytesSent, newGS, checksum);
 
          // receive ack for connect
          BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
              HdfsProtoUtil.vintPrefixed(blockReplyStream));
          pipelineStatus = resp.getStatus();
          firstBadLink = resp.getFirstBadLink();
         
          if (pipelineStatus != SUCCESS) {
            if (pipelineStatus == Status.ERROR_ACCESS_TOKEN) {
              throw new InvalidBlockTokenException(
                  "Got access token error for connect ack with firstBadLink as "
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

    //
    // Get bytes in block
    //
    DataInputStream in = new DataInputStream(ioStreams.in);

    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
        vintPrefixed(in));
    checkSuccess(status, sock, block, file);
    ReadOpChecksumInfoProto checksumInfo =
      status.getReadOpChecksumInfo();
    DataChecksum checksum = DataTransferProtoUtil.fromProto(
        checksumInfo.getChecksum());
    //Warning when we get CHECKSUM_NULL?

    // Read the first chunk offset.
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto

    DataInputStream in = new DataInputStream(
        new BufferedInputStream(NetUtils.getInputStream(sock),
                                bufferSize));
   
    BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
        vintPrefixed(in));
    RemoteBlockReader2.checkSuccess(status, sock, block, file);
    ReadOpChecksumInfoProto checksumInfo =
      status.getReadOpChecksumInfo();
    DataChecksum checksum = DataTransferProtoUtil.fromProto(
        checksumInfo.getChecksum());
    //Warning when we get CHECKSUM_NULL?
   
    // Read the first chunk offset.
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.