Package org.apache.hadoop.hdfs.client

Examples of org.apache.hadoop.hdfs.client.ClientMmap


    }
    int length = Math.min((int)blockLeft, maxLength);
    long blockStartInFile = currentLocatedBlock.getStartOffset();
    long blockPos = curPos - blockStartInFile;
    long limit = blockPos + length;
    ClientMmap clientMmap =
        blockReader.getClientMmap(opts, dfsClient.getMmapManager());
    if (clientMmap == null) {
      if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("unable to perform a zero-copy read from offset " +
          curPos + " of " + src + "; BlockReader#getClientMmap returned " +
          "null.");
      }
      return null;
    }
    seek(pos + length);
    ByteBuffer buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
    buffer.position((int)blockPos);
    buffer.limit((int)limit);
    clientMmap.ref();
    extendedReadBuffers.put(buffer, clientMmap);
    readStatistics.addZeroCopyBytes(length);
    if (DFSClient.LOG.isDebugEnabled()) {
      DFSClient.LOG.debug("readZeroCopy read " + maxLength + " bytes from " +
          "offset " + curPos + " via the zero-copy read path.  " +
View Full Code Here


            " to " + length + " to avoid 31-bit limit.  " +
            "blockPos=" + blockPos + "; curPos=" + curPos +
            "; curEnd=" + curEnd);
      }
    }
    final ClientMmap clientMmap = blockReader.getClientMmap(opts);
    if (clientMmap == null) {
      if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("unable to perform a zero-copy read from offset " +
          curPos + " of " + src + "; BlockReader#getClientMmap returned " +
          "null.");
      }
      return null;
    }
    boolean success = false;
    ByteBuffer buffer;
    try {
      seek(curPos + length);
      buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
      buffer.position((int)blockPos);
      buffer.limit((int)(blockPos + length));
      extendedReadBuffers.put(buffer, clientMmap);
      readStatistics.addZeroCopyBytes(length);
      if (DFSClient.LOG.isDebugEnabled()) {
View Full Code Here

              "we aren't skipping checksums, and the block is not mlocked.");
        }
        return null;
      }
    }
    ClientMmap clientMmap = null;
    try {
      clientMmap = replica.getOrCreateClientMmap(anchor);
    } finally {
      if ((clientMmap == null) && anchor) {
        releaseNoChecksumContext();
View Full Code Here

            " to " + length + " to avoid 31-bit limit.  " +
            "blockPos=" + blockPos + "; curPos=" + curPos +
            "; curEnd=" + curEnd);
      }
    }
    final ClientMmap clientMmap = blockReader.getClientMmap(opts);
    if (clientMmap == null) {
      if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("unable to perform a zero-copy read from offset " +
          curPos + " of " + src + "; BlockReader#getClientMmap returned " +
          "null.");
      }
      return null;
    }
    boolean success = false;
    ByteBuffer buffer;
    try {
      seek(curPos + length);
      buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
      buffer.position((int)blockPos);
      buffer.limit((int)(blockPos + length));
      extendedReadBuffers.put(buffer, clientMmap);
      readStatistics.addZeroCopyBytes(length);
      if (DFSClient.LOG.isDebugEnabled()) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.client.ClientMmap

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.