Package org.apache.hadoop.oncrpc

Examples of org.apache.hadoop.oncrpc.XDR


          + request.getHandle().getFileId());
      WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
          fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils.writeChannel(channel,
          response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
          xid);
    } else {
      // Update the write time first
      updateLastAccessTime();
     
      // Handle repeated write requests (same xid or not).
      // If already replied, send reply again. If not replied, drop the
      // repeated request.
      WriteCtx existantWriteCtx = checkRepeatedWriteRequest(request, channel,
          xid);
      if (existantWriteCtx != null) {
        if (!existantWriteCtx.getReplied()) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Repeated write request which hasn't be served: xid="
                + xid + ", drop it.");
          }
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Repeated write request which is already served: xid="
                + xid + ", resend response.");
          }
          WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
          WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
              fileWcc, request.getCount(), request.getStableHow(),
              Nfs3Constant.WRITE_COMMIT_VERF);
          Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
              new XDR(), xid, new VerifierNone()), xid);
        }
      } else {
        // not a repeated write request
        receivedNewWriteInternal(dfsClient, request, channel, xid,
            asyncDataService, iug);
View Full Code Here


          request.getData().array(),
          Nfs3Utils.getFileIdPath(request.getHandle()), wccData, iug);
    }
    updateLastAccessTime();
    Nfs3Utils.writeChannel(channel,
        response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
        xid);
  }
View Full Code Here

        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
View Full Code Here

      CommitCtx commit = entry.getValue();

      COMMIT3Response response = new COMMIT3Response(status, wccData,
          Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils.writeChannelCommit(commit.getChannel(), response
          .writeHeaderAndResponse(new XDR(), commit.getXid(),
              new VerifierNone()), commit.getXid());
     
      if (LOG.isDebugEnabled()) {
        LOG.debug("FileId: " + latestAttr.getFileid() + " Service time:"
            + (System.currentTimeMillis() - commit.getStartTime())
View Full Code Here

          count = writeCtx.getOriginalCount();
        }
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
            new XDR(), xid, new VerifierNone()), xid);
      }
     
      // Handle the waiting commits without holding any lock
      processCommits(writeCtx.getOffset() + writeCtx.getCount());
    
    } catch (IOException e) {
      LOG.error("Error writing to fileId " + handle.getFileId() + " at offset "
          + offset + " and length " + count, e);
      if (!writeCtx.getReplied()) {
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
            new XDR(), xid, new VerifierNone()), xid);
        // Keep stream open. Either client retries or SteamMonitor closes it.
      }

      LOG.info("Clean up open file context for fileId: "
          + latestAttr.getFileid());
View Full Code Here

      if (!writeCtx.getReplied()) {
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
            fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(writeCtx.getChannel(), response
            .writeHeaderAndResponse(new XDR(), writeCtx.getXid(),
                new VerifierNone()), writeCtx.getXid());
      }
    }
   
    // Cleanup dump file
View Full Code Here

    RpcCall rpcCall = (RpcCall) info.header();
    final MNTPROC mntproc = MNTPROC.fromValue(rpcCall.getProcedure());
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress()).getAddress();

    if (mntproc == MNTPROC.NULL) {
      out = nullOp(out, xid, client);
    } else if (mntproc == MNTPROC.MNT) {
      out = mnt(xdr, out, xid, client);
    } else if (mntproc == MNTPROC.DUMP) {
      out = dump(out, xid, client);
    } else if (mntproc == MNTPROC.UMNT) {     
      out = umnt(xdr, out, xid, client);
    } else if (mntproc == MNTPROC.UMNTALL) {
      umntall(out, xid, client);
    } else if (mntproc == MNTPROC.EXPORT) {
      // Currently only support one NFS export
      List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
      hostsMatchers.add(hostsMatcher);
      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
    } else {
      // Invalid procedure
      RpcAcceptedReply.getInstance(xid,
          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
          out);
   
    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
    RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
    RpcUtil.sendRpcResponse(ctx, rsp);
  }
View Full Code Here

      HdfsFileStatus status = client.getFileInfo("/");
      FileHandle rootHandle = new FileHandle(status.getFileId());
      // Create file1
      CREATE3Request createReq = new CREATE3Request(rootHandle, "file1",
          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
      XDR createXdr = new XDR();
      createReq.serialize(createXdr);
      CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());
      FileHandle handle = createRsp.getObjHandle();

      // Test DATA_SYNC
      byte[] buffer = new byte[10];
      for (int i = 0; i < 10; i++) {
        buffer[i] = (byte) i;
      }
      WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
          WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          InetAddress.getLocalHost());

      waitWrite(nfsd, handle, 60000);

      // Readback
      READ3Request readReq = new READ3Request(handle, 0, 10);
      XDR readXdr = new XDR();
      readReq.serialize(readXdr);
      READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp.getData().array()));

      // Test FILE_SYNC

      // Create file2
      CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2",
          Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
      XDR createXdr2 = new XDR();
      createReq2.serialize(createXdr2);
      CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());
      FileHandle handle2 = createRsp2.getObjHandle();

      WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10,
          WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
      XDR writeXdr2 = new XDR();
      writeReq2.serialize(writeXdr2);
      nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler,
          InetAddress.getLocalHost());

      waitWrite(nfsd, handle2, 60000);

      // Readback
      READ3Request readReq2 = new READ3Request(handle2, 0, 10);
      XDR readXdr2 = new XDR();
      readReq2.serialize(readXdr2);
      READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(),
          securityHandler, InetAddress.getLocalHost());

      assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
      // FILE_SYNC should sync the file size
      status = client.getFileInfo("/file2");
View Full Code Here

    int count = request.getCount();
    byte[] data = request.getData().array();
    if (data.length < count) {
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
      Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
          new XDR(), xid, new VerifierNone()), xid);
      return;
    }

    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
      LOG.debug("handleWrite " + request);
    }

    // Check if there is a stream to write
    FileHandle fileHandle = request.getHandle();
    OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
    if (openFileCtx == null) {
      LOG.info("No opened stream for fileId:" + fileHandle.getFileId());

      String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
      HdfsDataOutputStream fos = null;
      Nfs3FileAttributes latestAttr = null;
      try {
        int bufferSize = config.getInt(
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
            CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
       
        fos = dfsClient.append(fileIdPath, bufferSize, null, null);

        latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
      } catch (RemoteException e) {
        IOException io = e.unwrapRemoteException();
        if (io instanceof AlreadyBeingCreatedException) {
          LOG.warn("Can't append file:" + fileIdPath
              + ". Possibly the file is being closed. Drop the request:"
              + request + ", wait for the client to retry...");
          return;
        }
        throw e;
      } catch (IOException e) {
        LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e);
        if (fos != null) {
          fos.close();
        }
        WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr),
            preOpAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
            fileWcc, count, request.getStableHow(),
            Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
            new XDR(), xid, new VerifierNone()), xid);
        return;
      }

      // Add open stream
      String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
          Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
      openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
          + fileHandle.getFileId(), dfsClient, iug);

      if (!addOpenFileStream(fileHandle, openFileCtx)) {
        LOG.info("Can't add new stream. Close it. Tell client to retry.");
        try {
          fos.close();
        } catch (IOException e) {
          LOG.error("Can't close stream for fileId:" + handle.getFileId());
        }
        // Notify client to retry
        WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX,
            fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils.writeChannel(channel,
            response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
            xid);
        return;
      }

      if (LOG.isDebugEnabled()) {
View Full Code Here

    }
    WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
    COMMIT3Response response = new COMMIT3Response(status, fileWcc,
        Nfs3Constant.WRITE_COMMIT_VERF);
    Nfs3Utils.writeChannelCommit(channel,
        response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
        xid);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.oncrpc.XDR

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.