Examples of NamenodeProtocols


Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      final LengthParam length,
      final RenewerParam renewer,
      final BufferSizeParam bufferSize
      ) throws IOException, URISyntaxException {
    final NameNode namenode = (NameNode)context.getAttribute("name.node");
    final NamenodeProtocols np = namenode.getRpcServer();

    switch(op.getValue()) {
    case OPEN:
    {
      final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
          fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize);
      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case GET_BLOCK_LOCATIONS:
    {
      final long offsetValue = offset.getValue();
      final Long lengthValue = length.getValue();
      final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
          offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
      final String js = JsonUtil.toJsonString(locatedblocks);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETFILESTATUS:
    {
      final HdfsFileStatus status = np.getFileInfo(fullpath);
      if (status == null) {
        throw new FileNotFoundException("File does not exist: " + fullpath);
      }

      final String js = JsonUtil.toJsonString(status, true);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case LISTSTATUS:
    {
      final StreamingOutput streaming = getListingStream(np, fullpath);
      return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
    }
    case GETCONTENTSUMMARY:
    {
      final ContentSummary contentsummary = np.getContentSummary(fullpath);
      final String js = JsonUtil.toJsonString(contentsummary);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETFILECHECKSUM:
    {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      cluster.shutdown();
  }

  @Test
  public void testDelegationToken() throws IOException, InterruptedException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    HttpServletRequest request = mock(HttpServletRequest.class);
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser");
    String tokenString = NamenodeJspHelper.getDelegationToken(nn, request,
        conf, ugi);
    // tokenString returned must be null because security is disabled
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    Path file = new Path("/test-file");   
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
   
    try {
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols namenode = cluster.getNameNodeRpc();
      DFSOutputStream out = null;
      try {
        // Create a file and make sure a block is allocated for it.
        out = (DFSOutputStream)(fs.create(file).
            getWrappedStream());
        out.write(1);
        out.hflush();
       
        // Create a snapshot that includes the file.
        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
            new Path("/"), "s1");
       
        // Grab the block info of this file for later use.
        FSDataInputStream in = null;
        ExtendedBlock oldBlock = null;
        try {
          in = fs.open(file);
          oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
        } finally {
          IOUtils.closeStream(in);
        }
       
        // Allocate a new block ID/gen stamp so we can simulate pipeline
        // recovery.
        String clientName = ((DistributedFileSystem)fs).getClient()
            .getClientName();
        LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(
            oldBlock, clientName);
        ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
            oldBlock.getBlockId(), oldBlock.getNumBytes(),
            newLocatedBlock.getBlock().getGenerationStamp());

        // Delete the file from the present FS. It will still exist the
        // previously-created snapshot. This will log an OP_DELETE for the
        // file in question.
        fs.delete(file, true);
       
        // Simulate a pipeline recovery, wherein a new block is allocated
        // for the existing block, resulting in an OP_UPDATE_BLOCKS being
        // logged for the file in question.
        try {
          namenode.updatePipeline(clientName, oldBlock, newBlock,
              newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
        } catch (IOException ioe) {
          // normal
          assertExceptionContains(
              "does not exist or it is not under construction", ioe);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      assertEquals(++expectedLastInodeId, fsn.getLastInodeId());
      assertEquals(++inodeCount, fsn.dir.getInodeMapSize());

      // Create a file
      // Last inode ID and inode map size should increase by 1
      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
      DFSTestUtil.createFile(fs, new Path("/test1/file"), 1024, (short) 1, 0);
      assertEquals(++expectedLastInodeId, fsn.getLastInodeId());
      assertEquals(++inodeCount, fsn.dir.getInodeMapSize());
     
      // Ensure right inode ID is returned in file status
      HdfsFileStatus fileStatus = nnrpc.getFileInfo("/test1/file");
      assertEquals(expectedLastInodeId, fileStatus.getFileId());

      // Rename a directory
      // Last inode ID and inode map size should not change
      Path renamedPath = new Path("/test2");
      assertTrue(fs.rename(path, renamedPath));
      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
     
      // Delete test2/file and test2 and ensure inode map size decreases
      assertTrue(fs.delete(renamedPath, true));
      inodeCount -= 2;
      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
     
      // Create and concat /test/file1 /test/file2
      // Create /test1/file1 and /test1/file2
      String file1 = "/test1/file1";
      String file2 = "/test1/file2";
      DFSTestUtil.createFile(fs, new Path(file1), 512, (short) 1, 0);
      DFSTestUtil.createFile(fs, new Path(file2), 512, (short) 1, 0);
      inodeCount += 3; // test1, file1 and file2 are created
      expectedLastInodeId += 3;
      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
      // Concat the /test1/file1 /test1/file2 into /test1/file2
      nnrpc.concat(file2, new String[] {file1});
      inodeCount--; // file1 and file2 are concatenated to file2
      assertEquals(inodeCount, fsn.dir.getInodeMapSize());
      assertEquals(expectedLastInodeId, fsn.getLastInodeId());
      assertTrue(fs.delete(new Path("/test1"), true));
      inodeCount -= 2; // test1 and file2 is deleted
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      DistributedFileSystem fs = cluster.getFileSystem();
      NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
     
      // FileSystem#mkdirs "/testInodeIdBasedPaths"
      Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
      Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
      fs.mkdirs(baseDir);
      fs.exists(baseDir);
      long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
     
      // FileSystem#create file and FileSystem#close
      Path testFileInodePath = getInodePath(baseDirFileId, "test1");
      Path testFileRegularPath = new Path(baseDir, "test1");
      final int testFileBlockSize = 1024;
      FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
      assertTrue(fs.exists(testFileInodePath));
     
      // FileSystem#setPermission
      FsPermission perm = new FsPermission((short)0666);
      fs.setPermission(testFileInodePath, perm);
     
      // FileSystem#getFileStatus and FileSystem#getPermission
      FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
      assertEquals(perm, fileStatus.getPermission());
     
      // FileSystem#setOwner
      fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
     
      // FileSystem#setTimes
      fs.setTimes(testFileInodePath, 0, 0);
      fileStatus = fs.getFileStatus(testFileInodePath);
      assertEquals(0, fileStatus.getModificationTime());
      assertEquals(0, fileStatus.getAccessTime());
     
      // FileSystem#setReplication
      fs.setReplication(testFileInodePath, (short)3);
      fileStatus = fs.getFileStatus(testFileInodePath);
      assertEquals(3, fileStatus.getReplication());
      fs.setReplication(testFileInodePath, (short)1);
     
      // ClientProtocol#getPreferredBlockSize
      assertEquals(testFileBlockSize,
          nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
     
      // symbolic link related tests
     
      // Reserved path is not allowed as a target
      String invalidTarget = new Path(baseDir, "invalidTarget").toString();
      String link = new Path(baseDir, "link").toString();
      testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
     
      // Test creating a link using reserved inode path
      String validTarget = "/validtarget";
      testValidSymlinkTarget(nnRpc, validTarget, link);
     
      // FileSystem#append
      fs.append(testFileInodePath);
      // DistributedFileSystem#recoverLease
     
      fs.recoverLease(testFileInodePath);
     
      // Namenode#getBlockLocations
      LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(),
          0, Long.MAX_VALUE);
      LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(),
          0, Long.MAX_VALUE);
      checkEquals(l1, l2);
     
      // FileSystem#rename - both the variants
      Path renameDst = getInodePath(baseDirFileId, "test2");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    // create an uncompressed image
    LOG.info("Create an uncompressed fsimage");
    NameNode namenode = new NameNode(conf);
    namenode.getNamesystem().mkdirs("/test",
        new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace();
    namenode.stop();
    namenode.join();

    // compress image using default codec
    LOG.info("Read an uncomressed image and store it compressed using default codec.");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    checkNameSpace(conf);
  }

  private void checkNameSpace(Configuration conf) throws IOException {
    NameNode namenode = new NameNode(conf);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace();
    namenode.stop();
    namenode.join();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      cluster = new MiniDFSCluster.Builder(config)
      .numDataNodes(numDatanodes).setupHostsFile(true).build();
      cluster.waitActive();
 
      cluster.restartNameNode();
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      assertNotNull(nn);
      assertTrue(cluster.isDataNodeUp());
     
      DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) {
        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      }
      assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes,
          info.length);
     
    } catch (IOException e) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    REMOTE_ADDRESS.set(null);
  }
 
  private static NamenodeProtocols getRPCServer(NameNode namenode)
      throws IOException {
     final NamenodeProtocols np = namenode.getRpcServer();
     if (np == null) {
       throw new IOException("Namenode is in startup mode");
     }
     return np;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final NamenodeProtocols np = getRPCServer(namenode);
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return bestNode(locations.get(0).getLocations());
        }
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.