Examples of READ3Response


Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }

  @Override
  public READ3Response read(XDR xdr, SecurityHandler securityHandler,
      InetAddress client) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
   
    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(dfsClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId(), e);
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
          + ". Read may not get most recent data.");
    }

    try {
      int rtmax = config.getInt(Nfs3Constant.MAX_READ_TRANSFER_SIZE_KEY,
              Nfs3Constant.MAX_READ_TRANSFER_SIZE_DEFAULT);
      int buffSize = Math.min(rtmax, count);
      byte[] readbuffer = new byte[buffSize];

      int readCount = 0;
      /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
      for (int i = 0; i < 1; ++i) {
        FSDataInputStream fis = clientCache.getDfsInputStream(userName,
            Nfs3Utils.getFileIdPath(handle));

        try {
          readCount = fis.read(offset, readbuffer, 0, count);
        } catch (IOException e) {
          // TODO: A cleaner way is to throw a new type of exception
          // which requires incompatible changes.
          if (e.getMessage() == "Stream closed") {
            clientCache.invalidateDfsInputStream(userName,
                Nfs3Utils.getFileIdPath(handle));
            continue;
          } else {
            throw e;
          }
        }
      }

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }
 
  @VisibleForTesting
  READ3Response read(XDR xdr, SecurityHandler securityHandler,
      SocketAddress remoteAddress) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
   
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(dfsClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId(), e);
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(),
          securityHandler.getAuxGids(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
          + ". Read may not get most recent data.");
    }

    try {
      int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
          NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
      int buffSize = Math.min(rtmax, count);
      byte[] readbuffer = new byte[buffSize];

      int readCount = 0;
      /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
      for (int i = 0; i < 1; ++i) {
        FSDataInputStream fis = clientCache.getDfsInputStream(userName,
            Nfs3Utils.getFileIdPath(handle));

        try {
          readCount = fis.read(offset, readbuffer, 0, count);
        } catch (IOException e) {
          // TODO: A cleaner way is to throw a new type of exception
          // which requires incompatible changes.
          if (e.getMessage() == "Stream closed") {
            clientCache.invalidateDfsInputStream(userName,
                Nfs3Utils.getFileIdPath(handle));
            continue;
          } else {
            throw e;
          }
        }
      }

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

      // Readback
      READ3Request readReq = new READ3Request(handle, 0, 10);
      XDR readXdr = new XDR();
      readReq.serialize(readXdr);
      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
          securityHandler, new InetSocketAddress("localhost", 1234));

      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));

      // Test FILE_SYNC

      // Create file2
      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
      XDR createXdr2 = new XDR();
      createReq2.serialize(createXdr2);
      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
          securityHandler, new InetSocketAddress("localhost", 1234));
      FileHandle handle2 = createRsp2.getObjHandle();

      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
      XDR writeXdr2 = new XDR();
      writeReq2.serialize(writeXdr2);
      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));

      waitWrite(nfsd, handle2, 60000);

      // Readback
      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
      XDR readXdr2 = new XDR();
      readReq2.serialize(readXdr2);
      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
          securityHandler, new InetSocketAddress("localhost", 1234));

      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
      // FILE_SYNC should sync the file size
      status = client.getFileInfo("/file2");
      assertTrue(status.getLen() == 10);

    } finally {
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }

  @Override
  public READ3Response read(XDR xdr, SecurityHandler securityHandler,
      InetAddress client) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
   
    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(superUserClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }

  @Override
  public READ3Response read(XDR xdr, SecurityHandler securityHandler,
      InetAddress client) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
   
    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.get(securityHandler.getUser());
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

   
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(superUserClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys) {
    return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP);
  }

  public READ3Response read(XDR xdr, RpcAuthSys authSys) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    String uname = authSysCheck(authSys);
    DFSClient dfsClient = clientCache.get(uname);
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

   
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(superUserClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(authSys.getUid(),
          authSys.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      DFSInputStream is = dfsClient.open(Nfs3Utils.getFileIdPath(handle));
      FSDataInputStream fis = new FSDataInputStream(is);
     
      int readCount = fis.read(offset, readbuffer, 0, count);
      fis.close();

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

      // Readback
      READ3Request readReq = new READ3Request(handle, 0, 10);
      XDR readXdr = new XDR();
      readReq.serialize(readXdr);
      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));

      // Test FILE_SYNC

      // Create file2
      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
      XDR createXdr2 = new XDR();
      createReq2.serialize(createXdr2);
      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());
      FileHandle handle2 = createRsp2.getObjHandle();

      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
      XDR writeXdr2 = new XDR();
      writeReq2.serialize(writeXdr2);
      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
          InetAddress.getLocalHost());

      waitWrite(nfsd, handle2, 60000);

      // Readback
      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
      XDR readXdr2 = new XDR();
      readReq2.serialize(readXdr2);
      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
      // FILE_SYNC should sync the file size
      status = client.getFileInfo("/file2");
      assertTrue(status.getLen() == 10);

    } finally {
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }

  @Override
  public READ3Response read(XDR xdr, SecurityHandler securityHandler,
      InetAddress client) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
   
    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(superUserClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
          + ". Read may not get most recent data.");
    }

    try {
      int buffSize = Math.min(MAX_READ_TRANSFER_SIZE, count);
      byte[] readbuffer = new byte[buffSize];

      int readCount = 0;
      /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
      for (int i = 0; i < 1; ++i) {
        FSDataInputStream fis = clientCache.getDfsInputStream(userName,
            Nfs3Utils.getFileIdPath(handle));

        try {
          readCount = fis.read(offset, readbuffer, 0, count);
        } catch (IOException e) {
          // TODO: A cleaner way is to throw a new type of exception
          // which requires incompatible changes.
          if (e.getMessage() == "Stream closed") {
            clientCache.invalidateDfsInputStream(userName,
                Nfs3Utils.getFileIdPath(handle));
            continue;
          } else {
            throw e;
          }
        }
      }

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

  }

  @Override
  public READ3Response read(XDR xdr, SecurityHandler securityHandler,
      InetAddress client) {
    READ3Response response = new READ3Response(Nfs3Status.NFS3_OK);
    final String userName = securityHandler.getUser();
   
    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.getDfsClient(userName);
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
    READ3Request request = null;

    try {
      request = new READ3Request(xdr);
    } catch (IOException e) {
      LOG.error("Invalid READ request");
      return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
    }

    long offset = request.getOffset();
    int count = request.getCount();

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("NFS READ fileId: " + handle.getFileId() + " offset: " + offset
          + " count: " + count);
    }

    Nfs3FileAttributes attrs;
    boolean eof;
    if (count == 0) {
      // Only do access check.
      try {
        // Don't read from cache. Client may not have read permission.
        attrs = Nfs3Utils.getFileAttr(dfsClient,
            Nfs3Utils.getFileIdPath(handle), iug);
      } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Get error accessing file, fileId:" + handle.getFileId(), e);
        }
        return new READ3Response(Nfs3Status.NFS3ERR_IO);
      }
      if (attrs == null) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Can't get path for fileId:" + handle.getFileId());
        }
        return new READ3Response(Nfs3Status.NFS3ERR_NOENT);
      }
      int access = Nfs3Utils.getAccessRightsForUserGroup(
          securityHandler.getUid(), securityHandler.getGid(), attrs);
      if ((access & Nfs3Constant.ACCESS3_READ) != 0) {
        eof = offset < attrs.getSize() ? false : true;
        return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof,
            ByteBuffer.wrap(new byte[0]));
      } else {
        return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
      }
    }
   
    // In case there is buffered data for the same file, flush it. This can be
    // optimized later by reading from the cache.
    int ret = writeManager.commitBeforeRead(dfsClient, handle, offset + count);
    if (ret != Nfs3Status.NFS3_OK) {
      LOG.warn("commitBeforeRead didn't succeed with ret=" + ret
          + ". Read may not get most recent data.");
    }

    try {
      int rtmax = config.getInt(Nfs3Constant.MAX_READ_TRANSFER_SIZE_KEY,
              Nfs3Constant.MAX_READ_TRANSFER_SIZE_DEFAULT);
      int buffSize = Math.min(rtmax, count);
      byte[] readbuffer = new byte[buffSize];

      int readCount = 0;
      /**
       * Retry exactly once because the DFSInputStream can be stale.
       */
      for (int i = 0; i < 1; ++i) {
        FSDataInputStream fis = clientCache.getDfsInputStream(userName,
            Nfs3Utils.getFileIdPath(handle));

        try {
          readCount = fis.read(offset, readbuffer, 0, count);
        } catch (IOException e) {
          // TODO: A cleaner way is to throw a new type of exception
          // which requires incompatible changes.
          if (e.getMessage() == "Stream closed") {
            clientCache.invalidateDfsInputStream(userName,
                Nfs3Utils.getFileIdPath(handle));
            continue;
          } else {
            throw e;
          }
        }
      }

      attrs = Nfs3Utils.getFileAttr(dfsClient, Nfs3Utils.getFileIdPath(handle),
          iug);
      if (readCount < count) {
        LOG.info("Partical read. Asked offset:" + offset + " count:" + count
            + " and read back:" + readCount + "file size:" + attrs.getSize());
      }
      // HDFS returns -1 for read beyond file size.
      if (readCount < 0) {
        readCount = 0;
      }
      eof = (offset + readCount) < attrs.getSize() ? false : true;
      return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
          ByteBuffer.wrap(readbuffer));

    } catch (IOException e) {
      LOG.warn("Read error: " + e.getClass() + " offset: " + offset
          + " count: " + count, e);
      return new READ3Response(Nfs3Status.NFS3ERR_IO);
    }
  }
View Full Code Here

Examples of org.apache.hadoop.nfs.nfs3.response.READ3Response

      // Readback
      READ3Request readReq = new READ3Request(handle, 0, 10);
      XDR readXdr = new XDR();
      readReq.serialize(readXdr);
      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));

      // Test FILE_SYNC

      // Create file2
      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
      XDR createXdr2 = new XDR();
      createReq2.serialize(createXdr2);
      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());
      FileHandle handle2 = createRsp2.getObjHandle();

      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
      XDR writeXdr2 = new XDR();
      writeReq2.serialize(writeXdr2);
      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
          InetAddress.getLocalHost());

      waitWrite(nfsd, handle2, 60000);

      // Readback
      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
      XDR readXdr2 = new XDR();
      readReq2.serialize(readXdr2);
      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
      // FILE_SYNC should sync the file size
      status = client.getFileInfo("/file2");
      assertTrue(status.getLen() == 10);

    } finally {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.