Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.WriteBlockHeader


        DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

        //
        // Header info
        //
        WriteBlockHeader header = new WriteBlockHeader(
            DataTransferProtocol.DATA_TRANSFER_VERSION, dstNamespaceId,
            destinationBlock.getBlockId(),
            destinationBlock.getGenerationStamp(), 0, false, true, srcNode,
            targets.length - 1, targets, "");
        header.writeVersionAndOpCode(out);
        header.write(out);

        // send data & checksum
        blockSender.sendBlock(out, baseStream, null);

        // no response necessary
View Full Code Here


    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    WriteBlockHeader headerToReceive = new WriteBlockHeader(
        versionAndOpcode);
    headerToReceive.readFields(in);
    int namespaceid = headerToReceive.getNamespaceId();
    Block block = new Block(headerToReceive.getBlockId(),
        dataXceiverServer.estimateBlockSize, headerToReceive.getGenStamp());
    LOG.info("Receiving block " + block +
             " src: " + remoteAddress +
             " dest: " + localAddress);
    int pipelineSize = headerToReceive.getPipelineDepth(); // num of datanodes in entire pipeline
    boolean isRecovery = headerToReceive.isRecoveryFlag(); // is this part of recovery?
    String client = headerToReceive.getClientName(); // working on behalf of this client
    boolean hasSrcDataNode = headerToReceive.isHasSrcDataNode(); // is src node info present
    if (hasSrcDataNode) {
      srcDataNode = headerToReceive.getSrcDataNode();
    }
    int numTargets = headerToReceive.getNumTargets();
    DatanodeInfo targets[] = headerToReceive.getNodes();

    DataOutputStream mirrorOut = null// stream to next target
    DataInputStream mirrorIn = null;    // reply from next target
    DataOutputStream replyOut = null;   // stream to prev target
    Socket mirrorSock = null;           // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null;           // the name:port of next target
    String firstBadLink = "";           // first datanode that failed in connection setup

    updateCurrentThreadName("receiving block " + block + " client=" + client);
    try {
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(namespaceid, block, in,
          s.getRemoteSocketAddress().toString(),
          s.getLocalSocketAddress().toString(),
          isRecovery, client, srcDataNode, datanode);

      // get a connection back to the previous target
      replyOut = new DataOutputStream(new BufferedOutputStream(
                     NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
                     SMALL_BUFFER_SIZE));

      //
      // Open network conn to backup machine, if
      // appropriate
      //
      if (targets.length > 0) {
        InetSocketAddress mirrorTarget = null;
        // Connect to backup machine
        mirrorNode = targets[0].getName();
        mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
        mirrorSock = datanode.newSocket();
        try {
          int timeoutValue = datanode.socketTimeout +
                             (datanode.socketReadExtentionTimeout * numTargets);
          int writeTimeout = datanode.socketWriteTimeout +
                             (datanode.socketWriteExtentionTimeout * numTargets);
          NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
          mirrorSock.setSoTimeout(timeoutValue);
          mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
          mirrorOut = new DataOutputStream(
             new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                         SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

          // Write header: Copied from DFSClient.java!
          WriteBlockHeader headerToSend = new WriteBlockHeader(
              DataTransferProtocol.DATA_TRANSFER_VERSION, namespaceid,
              block.getBlockId(), block.getGenerationStamp(), pipelineSize,
              isRecovery, hasSrcDataNode, srcDataNode, targets.length - 1, targets,
              client);
          headerToSend.writeVersionAndOpCode(mirrorOut);
          headerToSend.write(mirrorOut);
          blockReceiver.writeChecksumHeader(mirrorOut);
          mirrorOut.flush();

          // read connect ack (only for clients, not for replication req)
          if (client.length() != 0) {
View Full Code Here

        tmpOut[curNode] = out;
        DataInputStream brs = new DataInputStream(NetUtils.getInputStream(s));
        replyIn[curNode] = brs;

        int version = dfsClient.getDataTransferProtocolVersion();
        WriteBlockHeader header = new WriteBlockHeader(version,
            namespaceId, block.getBlockId(), block.getGenerationStamp(),
            pipelineDepth, recoveryFlag, false, null, pipelineDepth - 1,
            nodes, client);
        header.writeVersionAndOpCode(out);
        header.write(out);
        checksum.writeHeader(out);
        out.flush();

        // receive ack for connect
        firstBadLink = Text.readString(brs);
View Full Code Here

    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    WriteBlockHeader headerToReceive = new WriteBlockHeader(
        versionAndOpcode);
    headerToReceive.readFields(in);
    int namespaceid = headerToReceive.getNamespaceId();
    Block block = new Block(headerToReceive.getBlockId(),
        dataXceiverServer.estimateBlockSize, headerToReceive.getGenStamp());
    LOG.info("Receiving block " + block +
             " src: " + remoteAddress +
             " dest: " + localAddress);
    int pipelineSize = headerToReceive.getPipelineDepth(); // num of datanodes in entire pipeline
    boolean isRecovery = headerToReceive.isRecoveryFlag(); // is this part of recovery?
    String client = headerToReceive.getClientName(); // working on behalf of this client
    boolean hasSrcDataNode = headerToReceive.isHasSrcDataNode(); // is src node info present
    if (hasSrcDataNode) {
      srcDataNode = headerToReceive.getSrcDataNode();
    }
    int numTargets = headerToReceive.getNumTargets();
    DatanodeInfo targets[] = headerToReceive.getNodes();

    DataOutputStream mirrorOut = null// stream to next target
    DataInputStream mirrorIn = null;    // reply from next target
    DataOutputStream replyOut = null;   // stream to prev target
    Socket mirrorSock = null;           // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null;           // the name:port of next target
    String firstBadLink = "";           // first datanode that failed in connection setup

    updateCurrentThreadName("receiving block " + block + " client=" + client);
    try {
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(namespaceid, block, in,
          s.getRemoteSocketAddress().toString(),
          s.getLocalSocketAddress().toString(),
          isRecovery, client, srcDataNode, datanode);

      // get a connection back to the previous target
      replyOut = new DataOutputStream(new BufferedOutputStream(
                     NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
                     SMALL_BUFFER_SIZE));

      //
      // Open network conn to backup machine, if
      // appropriate
      //
      if (targets.length > 0) {
        InetSocketAddress mirrorTarget = null;
        // Connect to backup machine
        mirrorNode = targets[0].getName();
        mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
        mirrorSock = datanode.newSocket();
        try {
          int timeoutValue = datanode.socketTimeout +
                             (datanode.socketReadExtentionTimeout * numTargets);
          int writeTimeout = datanode.socketWriteTimeout +
                             (datanode.socketWriteExtentionTimeout * numTargets);
          NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
          mirrorSock.setSoTimeout(timeoutValue);
          mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
          mirrorOut = new DataOutputStream(
             new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                         SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

          // Write header: Copied from DFSClient.java!
          WriteBlockHeader headerToSend = new WriteBlockHeader(
              DataTransferProtocol.DATA_TRANSFER_VERSION, namespaceid,
              block.getBlockId(), block.getGenerationStamp(), pipelineSize,
              isRecovery, hasSrcDataNode, srcDataNode, targets.length - 1, targets,
              client);
          headerToSend.writeVersionAndOpCode(mirrorOut);
          headerToSend.write(mirrorOut);
          blockReceiver.writeChecksumHeader(mirrorOut);
          mirrorOut.flush();

          // read connect ack (only for clients, not for replication req)
          if (client.length() != 0) {
View Full Code Here

        DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

        //
        // Header info
        //
        WriteBlockHeader header = new WriteBlockHeader(
            DataTransferProtocol.DATA_TRANSFER_VERSION, dstNamespaceId,
            destinationBlock.getBlockId(),
            destinationBlock.getGenerationStamp(), 0, false, true, srcNode,
            targets.length - 1, targets, "");
        header.writeVersionAndOpCode(out);
        header.write(out);

        // send data & checksum
        blockSender.sendBlock(out, baseStream, null);

        // no response necessary
View Full Code Here

          tmpOut[curNode] = out;
          DataInputStream brs = new DataInputStream(NetUtils.getInputStream(s));
          replyIn[curNode] = brs;

          int version = getDataTransferProtocolVersion();
          WriteBlockHeader header = new WriteBlockHeader(version,
              namespaceId, block.getBlockId(), block.getGenerationStamp(),
              pipelineDepth, recoveryFlag, false, null, pipelineDepth - 1,
              nodes, client);
          header.writeVersionAndOpCode(out);
          header.write(out);
          checksum.writeHeader(out);
          out.flush();

          // receive ack for connect
          firstBadLink = Text.readString(brs);
View Full Code Here

              return BlockDataFile.getDummyDataFileFromFileChannel(
                  blockContents.getChannel()).getReader(null);
            }
          });

      WriteBlockHeader header = new WriteBlockHeader(new VersionAndOpcode(
          dataTransferVersion, DataTransferProtocol.OP_WRITE_BLOCK));
      header.set(namespaceId, block.getBlockId(), block.getGenerationStamp(),
          0, false, true, new DatanodeInfo(), 0, null, "");
      header.writeVersionAndOpCode(out);
      header.write(out);
      blockSender.sendBlock(out, baseStream, null, progress);

      LOG.info("Sent block " + block + " to " + datanode);
    } finally {
      sock.close();
View Full Code Here

    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    WriteBlockHeader headerToReceive = new WriteBlockHeader(
        versionAndOpcode);
    headerToReceive.readFields(in);

    WriteOptions options = headerToReceive.getWritePipelineInfo()
        .getWriteOptions();
    boolean ioprioEnabled = !options.isIoprioDisabled();
    if (ioprioEnabled) {
      NativeIO.ioprioSetIfPossible(options.getIoprioClass(),
          options.getIoprioData());
    }

    int namespaceid = headerToReceive.getNamespaceId();
    Block block = new Block(headerToReceive.getBlockId(),
        dataXceiverServer.estimateBlockSize, headerToReceive.getGenStamp());
    if (LOG.isInfoEnabled()) {
      if (remoteAddress == null) {
        getAddresses();
      }
      LOG.info("Receiving block " + block +
               " src: " + remoteAddress +
               " dest: " + localAddress);
    }
    int pipelineSize = headerToReceive.getPipelineDepth(); // num of datanodes in entire pipeline
    boolean isRecovery = headerToReceive.isRecoveryFlag(); // is this part of recovery?
    String client = headerToReceive.getWritePipelineInfo().getClientName(); // working on behalf of this client
    boolean hasSrcDataNode = headerToReceive.getWritePipelineInfo()
        .hasSrcDataNode(); // is src node info present
    if (hasSrcDataNode) {
      srcDataNode = headerToReceive.getWritePipelineInfo().getSrcDataNode();
    }
    int numTargets = headerToReceive.getWritePipelineInfo().getNumTargets();
    DatanodeInfo targets[] = headerToReceive.getWritePipelineInfo().getNodes();
    int fadvise = headerToReceive.getWritePipelineInfo().getWriteOptions()
        .getFadvise();

    DataOutputStream mirrorOut = null// stream to next target
    DataInputStream mirrorIn = null;    // reply from next target
    DataOutputStream replyOut = null;   // stream to prev target
    Socket mirrorSock = null;           // socket to next target
    BlockReceiver blockReceiver = null; // responsible for data handling
    String mirrorNode = null;           // the name:port of next target
    String firstBadLink = "";           // first datanode that failed in connection setup

    updateCurrentThreadName("receiving block " + block + " client=" + client);
    InjectionHandler.processEvent(InjectionEvent.DATANODE_WRITE_BLOCK);
    try {
      boolean ifProfileEnabled = headerToReceive.getWritePipelineInfo()
          .getWriteOptions().ifProfileEnabled();
      boolean isSecondary = (targets.length + 1 != pipelineSize);
      // open a block receiver and check if the block does not exist
      blockReceiver = new BlockReceiver(namespaceid, block, block, in,
          s.getRemoteSocketAddress().toString(),
          s.getLocalSocketAddress().toString(),
          isRecovery, client, srcDataNode, datanode, isSecondary, fadvise,
          ifProfileEnabled, versionAndOpcode.getDataTransferVersion() >=
            DataTransferProtocol.PACKET_INCLUDE_VERSION_VERSION,
            options.getSyncFileRange());

      // get a connection back to the previous target
      replyOut = new DataOutputStream(new BufferedOutputStream(
                     NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
                     SMALL_BUFFER_SIZE));

      //
      // Open network conn to backup machine, if
      // appropriate
      //
      if (targets.length > 0) {
        InetSocketAddress mirrorTarget = null;
        // Connect to backup machine
        mirrorNode = targets[0].getName();
        mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
        mirrorSock = datanode.newSocket();
        try {
          int timeoutValue = datanode.socketTimeout +
                             (datanode.socketReadExtentionTimeout * numTargets);
          int writeTimeout = datanode.socketWriteTimeout +
                             (datanode.socketWriteExtentionTimeout * numTargets);
          NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
          mirrorSock.setSoTimeout(timeoutValue);
          mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
          mirrorOut = new DataOutputStream(
             new BufferedOutputStream(
                         NetUtils.getOutputStream(mirrorSock, writeTimeout),
                         SMALL_BUFFER_SIZE));
          mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));

          // Write header: Copied from DFSClient.java!
          WriteBlockHeader headerToSend = new WriteBlockHeader(
              versionAndOpcode.getDataTransferVersion(), namespaceid,
              block.getBlockId(), block.getGenerationStamp(), pipelineSize,
              isRecovery, hasSrcDataNode, srcDataNode, targets.length - 1, targets,
              client);
          headerToSend.getWritePipelineInfo().setWriteOptions(options);
          headerToSend.writeVersionAndOpCode(mirrorOut);
          headerToSend.write(mirrorOut);
          blockReceiver.writeChecksumHeader(mirrorOut);
          mirrorOut.flush();

          // read connect ack (only for clients, not for replication req)
          if (client.length() != 0) {
View Full Code Here

        DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

        //
        // Header info
        //
        WriteBlockHeader header = new WriteBlockHeader(
            dataTransferVersion, dstNamespaceId,
            destinationBlock.getBlockId(),
            destinationBlock.getGenerationStamp(), 0, false, true, srcNode,
            targets.length - 1, targets, "");
        header.writeVersionAndOpCode(out);
        header.write(out);

        // send data & checksum
        DataTransferThrottler trottler = null;
        if (dataTransferMaxRate > 0) {
          trottler = new DataTransferThrottler(dataTransferMaxRate);
View Full Code Here

        }

        int version = dfsClient.getDataTransferProtocolVersion();
        // write the header
        if (!appendFlag) {
          WriteBlockHeader header = new WriteBlockHeader(version,
              dfsClient.namespaceId, block.getBlockId(), block.getGenerationStamp(),
              pipelineDepth, recoveryFlag, false, null, pipelineDepth - 1,
              nodes, client);
          header.getWritePipelineInfo().setWriteOptions(options);
          header.getWritePipelineInfo().getWriteOptions()
              .setIfProfileEnabled(profileData != null);
          header.writeVersionAndOpCode(out);
          header.write(out);
        } else {
          AppendBlockHeader header = new AppendBlockHeader(version,
              dfsClient.namespaceId, block.getBlockId(), block.getNumBytes(),
              block.getGenerationStamp(),
              pipelineDepth, false, null, pipelineDepth - 1,
              nodes, client);
          header.writeVersionAndOpCode(out);
          header.write(out);
        }
        checksum.writeHeader(out);
        out.flush();

        // receive ack for connect
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.WriteBlockHeader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.