Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.VersionAndOpcode


      s.setSoTimeout(datanode.socketTimeout*5);

      in = new DataInputStream(
          new BufferedInputStream(NetUtils.getInputStream(s),
                                  SMALL_BUFFER_SIZE));
      VersionAndOpcode versionAndOpcode = new VersionAndOpcode();
      versionAndOpcode.readFields(in);
      op = versionAndOpcode.getOpCode();
     
      boolean local = s.getInetAddress().equals(s.getLocalAddress());
      updateCurrentThreadName("waiting for operation");
     
      // Make sure the xciver count is not exceeded
View Full Code Here


      s.setSoTimeout(datanode.socketTimeout*5);

      in = new DataInputStream(
          new BufferedInputStream(NetUtils.getInputStream(s),
                                  SMALL_BUFFER_SIZE));
      VersionAndOpcode versionAndOpcode = new VersionAndOpcode();
      versionAndOpcode.readFields(in);
      op = versionAndOpcode.getOpCode();
     
      boolean local = s.getInetAddress().equals(s.getLocalAddress());
      updateCurrentThreadName("waiting for operation");
     
      // Make sure the xciver count is not exceeded
View Full Code Here

              return BlockDataFile.getDummyDataFileFromFileChannel(
                  blockContents.getChannel()).getReader(null);
            }
          });

      WriteBlockHeader header = new WriteBlockHeader(new VersionAndOpcode(
          dataTransferVersion, DataTransferProtocol.OP_WRITE_BLOCK));
      header.set(namespaceId, block.getBlockId(), block.getGenerationStamp(),
          0, false, true, new DatanodeInfo(), 0, null, "");
      header.writeVersionAndOpCode(out);
      header.write(out);
View Full Code Here

    /**
     * Send a block replace request to the output stream
     */
    private void sendRequest(DataOutputStream out) throws IOException {
      ReplaceBlockHeader header = new ReplaceBlockHeader(new VersionAndOpcode(
          dataTransferProtocolVersion, DataTransferProtocol.OP_REPLACE_BLOCK));
      header.set(namespaceId, block.getBlock().getBlockId(), block.getBlock()
          .getGenerationStamp(), source.getStorageID(), proxySource);
      header.writeVersionAndOpCode(out);
      header.write(out);
View Full Code Here

     
      do {
        long startNanoTime = System.nanoTime();
        long startTime = DataNode.now();
       
        VersionAndOpcode versionAndOpcode = new VersionAndOpcode();
       
        try {
          if (opsProcessed != 0) {
            assert socketKeepaliveTimeout > 0;
            s.setSoTimeout(socketKeepaliveTimeout);
          }
          versionAndOpcode.readFields(in);
          op = versionAndOpcode.getOpCode();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }
       
        // restore normal timeout.
        if (opsProcessed != 0) {
          s.setSoTimeout(stdTimeout);
        }

        boolean local = s.getInetAddress().equals(s.getLocalAddress());
        updateCurrentThreadName("waiting for operation");

        // Make sure the xciver count is not exceeded
        int curXceiverCount = datanode.getXceiverCount();
        if (curXceiverCount > dataXceiverServer.maxXceiverCount) {
          datanode.myMetrics.xceiverCountExceeded.inc();
          throw new IOException("xceiverCount " + curXceiverCount
              + " exceeds the limit of concurrent xcievers "
              + dataXceiverServer.maxXceiverCount);
        }
       
        switch ( op ) {
        case DataTransferProtocol.OP_READ_BLOCK:
          readBlock( in, versionAndOpcode , startNanoTime);
          datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
          if (local)
            datanode.myMetrics.readsFromLocalClient.inc();
          else
            datanode.myMetrics.readsFromRemoteClient.inc();
          break;
        case DataTransferProtocol.OP_READ_BLOCK_ACCELERATOR:
          readBlockAccelerator(in, versionAndOpcode);
          datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
          if (local)
            datanode.myMetrics.readsFromLocalClient.inc();
          else
            datanode.myMetrics.readsFromRemoteClient.inc();
          break;
        case DataTransferProtocol.OP_WRITE_BLOCK:
          writeBlock( in, versionAndOpcode );
          datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
          if (local)
            datanode.myMetrics.writesFromLocalClient.inc();
          else
            datanode.myMetrics.writesFromRemoteClient.inc();
          break;
        case DataTransferProtocol.OP_READ_METADATA:
          readMetadata(in, versionAndOpcode);
          datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
          break;
        case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
          replaceBlock(in, versionAndOpcode);
          datanode.myMetrics.replaceBlockOp.inc(DataNode.now() - startTime);
          break;
        case DataTransferProtocol.OP_COPY_BLOCK:
          // for balancing purpose; send to a proxy source
          copyBlock(in, versionAndOpcode);
          datanode.myMetrics.copyBlockOp.inc(DataNode.now() - startTime);
          break;
        case DataTransferProtocol.OP_BLOCK_CHECKSUM: //get the checksum of a block
          getBlockChecksum(in, versionAndOpcode);
          datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime);
          break;
        case DataTransferProtocol.OP_BLOCK_CRC: //get the checksum of a block
          getBlockCrc(in, versionAndOpcode);
          datanode.myMetrics.blockChecksumOp.inc(DataNode.now() - startTime);
          break;
        case DataTransferProtocol.OP_APPEND_BLOCK:
          appendBlock(in, versionAndOpcode);
          datanode.myMetrics.appendBlockOp.inc(DataNode.now() - startTime);
          if (local)
            datanode.myMetrics.writesFromLocalClient.inc();
          else
            datanode.myMetrics.writesFromRemoteClient.inc();
          break;
        default:
          throw new IOException("Unknown opcode " + op + " in data stream");
        }
       
        ++ opsProcessed;
       
        if (versionAndOpcode.getDataTransferVersion() <
            DataTransferProtocol.READ_REUSE_CONNECTION_VERSION ||
            !reuseConnection) {
          break;
        }
      } while (s.isConnected() && socketKeepaliveTimeout > 0);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.VersionAndOpcode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.