Examples of XDR


Examples of org.apache.hadoop.oncrpc.XDR

    RpcCall rpcCall = (RpcCall) info.header();
    final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress())
        .getAddress();
    Channel channel = info.channel();

    Credentials credentials = rpcCall.getCredential();
    // Ignore auth only for NFSPROC3_NULL, especially for Linux clients.
    if (nfsproc3 != NFSPROC3.NULL) {
      if (credentials.getFlavor() != AuthFlavor.AUTH_SYS
          && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
        LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor()
            + " is not AUTH_SYS or RPCSEC_GSS.");
        XDR reply = new XDR();
        RpcDeniedReply rdr = new RpcDeniedReply(xid,
            RpcReply.ReplyState.MSG_ACCEPTED,
            RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
        rdr.write(reply);

        ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap()
            .buffer());
        RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
        RpcUtil.sendRpcResponse(ctx, rsp);
        return;
      }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

 
  @Test
  public void testSerializeDeserialize() {
    // Serialize NfsTime
    NfsTime t1 = new NfsTime(1001);
    XDR xdr = new XDR();
    t1.serialize(xdr);
   
    // Deserialize it back
    NfsTime t2 = NfsTime.deserialize(xdr.asReadOnlyWrap());
   
    // Ensure the NfsTimes are equal
    Assert.assertEquals(t1, t2);
  }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

public class TestFileHandle {
  @Test
  public void testConstructor() {
    FileHandle handle = new FileHandle(1024);
    XDR xdr = new XDR();
    handle.serialize(xdr);
    Assert.assertEquals(handle.getFileId(), 1024);

    // Deserialize it back
    FileHandle handle2 = new FileHandle();
    handle2.deserialize(xdr.asReadOnlyWrap());
    Assert.assertEquals("Failed: Assert 1024 is id ", 1024,
            handle.getFileId());
  }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

    RpcCall rpcCall = (RpcCall) info.header();
    final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure());   
    int xid = rpcCall.getXid();
    byte[] data = new byte[info.data().readableBytes()];
    info.data().readBytes(data);
    XDR xdr = new XDR(data);
    XDR out = new XDR();
    InetAddress client = ((InetSocketAddress) info.remoteAddress())
        .getAddress();
    Credentials credentials = rpcCall.getCredential();
   
    // Ignore auth only for NFSPROC3_NULL, especially for Linux clients.
    if (nfsproc3 != NFSPROC3.NULL) {
      if (credentials.getFlavor() != AuthFlavor.AUTH_SYS
          && credentials.getFlavor() != AuthFlavor.RPCSEC_GSS) {
        LOG.info("Wrong RPC AUTH flavor, " + credentials.getFlavor()
            + " is not AUTH_SYS or RPCSEC_GSS.");
        XDR reply = new XDR();
        RpcDeniedReply rdr = new RpcDeniedReply(xid,
            RpcReply.ReplyState.MSG_ACCEPTED,
            RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
        rdr.write(reply);

        ChannelBuffer buf = ChannelBuffers.wrappedBuffer(reply.asReadOnlyWrap()
            .buffer());
        RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
        RpcUtil.sendRpcResponse(ctx, rsp);
        return;
      }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

    }
  }

  @Test(timeout = 1000)
  public void testRegistration() throws IOException, InterruptedException {
    XDR req = new XDR();
    RpcCall.getInstance(++xid, RpcProgramPortmap.PROGRAM,
        RpcProgramPortmap.VERSION,
        RpcProgramPortmap.PMAPPROC_SET,
        new CredentialsNone(), new VerifierNone()).write(req);

    PortmapMapping sent = new PortmapMapping(90000, 1,
        PortmapMapping.TRANSPORT_TCP, 1234);
    sent.serialize(req);

    byte[] reqBuf = req.getBytes();
    DatagramSocket s = new DatagramSocket();
    DatagramPacket p = new DatagramPacket(reqBuf, reqBuf.length,
        pm.getUdpServerLoAddress());
    try {
      s.send(p);
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

    // Get inodeId of /tmp
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();

    // Create related part of the XDR request
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    xdr_req.writeLongAsHyper(0); // cookie
    xdr_req.writeLongAsHyper(0); // verifier
    xdr_req.writeInt(100); // count

    READDIR3Response response = nfsd.readdir(xdr_req.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    List<Entry3> dirents = response.getDirList().getEntries();
    assertTrue(dirents.size() == 5); // inculding dot, dotdot

    // Test start listing from f2
    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
    long f2Id = status.getFileId();

    // Create related part of the XDR request
    xdr_req = new XDR();
    handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    xdr_req.writeLongAsHyper(f2Id); // cookie
    xdr_req.writeLongAsHyper(0); // verifier
    xdr_req.writeInt(100); // count

    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
        new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    assertTrue(dirents.size() == 1);
    Entry3 entry = dirents.get(0);
    assertTrue(entry.getName().equals("f3"));

    // When the cookie is deleted, list starts over no including dot, dotdot
    hdfs.delete(new Path(testdir + "/f2"), false);

    response = nfsd.readdir(xdr_req.asReadOnlyWrap(), securityHandler,
        new InetSocketAddress("localhost", 1234));
    dirents = response.getDirList().getEntries();
    assertTrue(dirents.size() == 2); // No dot, dotdot
  }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

    // Get inodeId of /tmp
    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
    long dirId = status.getFileId();
   
    // Create related part of the XDR request
    XDR xdr_req = new XDR();
    FileHandle handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    xdr_req.writeLongAsHyper(0); // cookie
    xdr_req.writeLongAsHyper(0); // verifier
    xdr_req.writeInt(100); // dirCount
    xdr_req.writeInt(1000); // maxCount

    READDIRPLUS3Response responsePlus = nfsd.readdirplus(xdr_req
        .asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost",
        1234));
    List<EntryPlus3> direntPlus = responsePlus.getDirListPlus().getEntries();
    assertTrue(direntPlus.size() == 5); // including dot, dotdot

    // Test start listing from f2
    status = nn.getRpcServer().getFileInfo(testdir + "/f2");
    long f2Id = status.getFileId();

    // Create related part of the XDR request
    xdr_req = new XDR();
    handle = new FileHandle(dirId);
    handle.serialize(xdr_req);
    xdr_req.writeLongAsHyper(f2Id); // cookie
    xdr_req.writeLongAsHyper(0); // verifier
    xdr_req.writeInt(100); // dirCount
    xdr_req.writeInt(1000); // maxCount

    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
        new InetSocketAddress("localhost", 1234));
    direntPlus = responsePlus.getDirListPlus().getEntries();
    assertTrue(direntPlus.size() == 1);
    EntryPlus3 entryPlus = direntPlus.get(0);
    assertTrue(entryPlus.getName().equals("f3"));

    // When the cookie is deleted, list starts over no including dot, dotdot
    hdfs.delete(new Path(testdir + "/f2"), false);

    responsePlus = nfsd.readdirplus(xdr_req.asReadOnlyWrap(), securityHandler,
        new InetSocketAddress("localhost", 1234));
    direntPlus = responsePlus.getDirListPlus().getEntries();
    assertTrue(direntPlus.size() == 2); // No dot, dotdot
  }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

          + request.getHandle().getFileId());
      WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
      WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
          fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
      Nfs3Utils.writeChannel(channel,
          response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
          xid);
    } else {
      // Update the write time first
      updateLastAccessTime();
     
      // Handle repeated write requests (same xid or not).
      // If already replied, send reply again. If not replied, drop the
      // repeated request.
      WriteCtx existantWriteCtx = checkRepeatedWriteRequest(request, channel,
          xid);
      if (existantWriteCtx != null) {
        if (!existantWriteCtx.getReplied()) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Repeated write request which hasn't be served: xid="
                + xid + ", drop it.");
          }
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Repeated write request which is already served: xid="
                + xid + ", resend response.");
          }
          WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
          WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
              fileWcc, request.getCount(), request.getStableHow(),
              Nfs3Constant.WRITE_COMMIT_VERF);
          Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
              new XDR(), xid, new VerifierNone()), xid);
        }
      } else {
        // not a repeated write request
        receivedNewWriteInternal(dfsClient, request, channel, xid,
            asyncDataService, iug);
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

          request.getData().array(),
          Nfs3Utils.getFileIdPath(request.getHandle()), wccData, iug);
    }
    updateLastAccessTime();
    Nfs3Utils.writeChannel(channel,
        response.writeHeaderAndResponse(new XDR(), xid, new VerifierNone()),
        xid);
  }
View Full Code Here

Examples of org.apache.hadoop.oncrpc.XDR

        }
        WccData fileWcc = new WccData(preOpAttr, latestAttr);
        WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
            fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
        Nfs3Utils
            .writeChannel(channel, response.writeHeaderAndResponse(new XDR(),
                xid, new VerifierNone()), xid);
        writeCtx.setReplied(true);
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.