Examples of NamenodeProtocols


Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      final OldSnapshotNameParam oldSnapshotName
      ) throws IOException, URISyntaxException {

    final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
    final NameNode namenode = (NameNode)context.getAttribute("name.node");
    final NamenodeProtocols np = getRPCServer(namenode);

    switch(op.getValue()) {
    case CREATE:
    {
      final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
          fullpath, op.getValue(), -1L, blockSize.getValue(conf),
          permission, overwrite, bufferSize, replication, blockSize);
      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case MKDIRS:
    {
      final boolean b = np.mkdirs(fullpath, permission.getFsPermission(), true);
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case CREATESYMLINK:
    {
      np.createSymlink(destination.getValue(), fullpath,
          PermissionParam.getDefaultFsPermission(), createParent.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case RENAME:
    {
      final EnumSet<Options.Rename> s = renameOptions.getValue();
      if (s.isEmpty()) {
        final boolean b = np.rename(fullpath, destination.getValue());
        final String js = JsonUtil.toJsonString("boolean", b);
        return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
      } else {
        np.rename2(fullpath, destination.getValue(),
            s.toArray(new Options.Rename[s.size()]));
        return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
      }
    }
    case SETREPLICATION:
    {
      final boolean b = np.setReplication(fullpath, replication.getValue(conf));
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case SETOWNER:
    {
      if (owner.getValue() == null && group.getValue() == null) {
        throw new IllegalArgumentException("Both owner and group are empty.");
      }

      np.setOwner(fullpath, owner.getValue(), group.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETPERMISSION:
    {
      np.setPermission(fullpath, permission.getFsPermission());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETTIMES:
    {
      np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case RENEWDELEGATIONTOKEN:
    {
      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
      token.decodeFromUrlString(delegationTokenArgument.getValue());
      final long expiryTime = np.renewDelegationToken(token);
      final String js = JsonUtil.toJsonString("long", expiryTime);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case CANCELDELEGATIONTOKEN:
    {
      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
      token.decodeFromUrlString(delegationTokenArgument.getValue());
      np.cancelDelegationToken(token);
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case MODIFYACLENTRIES: {
      np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case REMOVEACLENTRIES: {
      np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case REMOVEDEFAULTACL: {
      np.removeDefaultAcl(fullpath);
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case REMOVEACL: {
      np.removeAcl(fullpath);
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETACL: {
      np.setAcl(fullpath, aclPermission.getAclPermission(true));
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETXATTR: {
      np.setXAttr(
          fullpath,
          XAttrHelper.buildXAttr(xattrName.getXAttrName(),
              xattrValue.getXAttrValue()), xattrSetFlag.getFlag());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case REMOVEXATTR: {
      np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case CREATESNAPSHOT: {
      String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
      final String js = JsonUtil.toJsonString(
          org.apache.hadoop.fs.Path.class.getSimpleName(), snapshotPath);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case RENAMESNAPSHOT: {
      np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
          snapshotName.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      final BufferSizeParam bufferSize,
      final List<XAttrNameParam> xattrNames,
      final XAttrEncodingParam xattrEncoding
      ) throws IOException, URISyntaxException {
    final NameNode namenode = (NameNode)context.getAttribute("name.node");
    final NamenodeProtocols np = getRPCServer(namenode);

    switch(op.getValue()) {
    case OPEN:
    {
      final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
          fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize);
      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case GET_BLOCK_LOCATIONS:
    {
      final long offsetValue = offset.getValue();
      final Long lengthValue = length.getValue();
      final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
          offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
      final String js = JsonUtil.toJsonString(locatedblocks);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETFILESTATUS:
    {
      final HdfsFileStatus status = np.getFileInfo(fullpath);
      if (status == null) {
        throw new FileNotFoundException("File does not exist: " + fullpath);
      }

      final String js = JsonUtil.toJsonString(status, true);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case LISTSTATUS:
    {
      final StreamingOutput streaming = getListingStream(np, fullpath);
      return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
    }
    case GETCONTENTSUMMARY:
    {
      final ContentSummary contentsummary = np.getContentSummary(fullpath);
      final String js = JsonUtil.toJsonString(contentsummary);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETFILECHECKSUM:
    {
      final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
          fullpath, op.getValue(), -1L, -1L);
      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case GETDELEGATIONTOKEN:
    {
      if (delegation.getValue() != null) {
        throw new IllegalArgumentException(delegation.getName()
            + " parameter is not null.");
      }
      final Token<? extends TokenIdentifier> token = generateDelegationToken(
          namenode, ugi, renewer.getValue());
      final String js = JsonUtil.toJsonString(token);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETHOMEDIRECTORY:
    {
      final String js = JsonUtil.toJsonString(
          org.apache.hadoop.fs.Path.class.getSimpleName(),
          WebHdfsFileSystem.getHomeDirectoryString(ugi));
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETACLSTATUS: {
      AclStatus status = np.getAclStatus(fullpath);
      if (status == null) {
        throw new FileNotFoundException("File does not exist: " + fullpath);
      }

      final String js = JsonUtil.toJsonString(status);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case GETXATTRS: {
      List<String> names = null;
      if (xattrNames != null) {
        names = Lists.newArrayListWithCapacity(xattrNames.size());
        for (XAttrNameParam xattrName : xattrNames) {
          if (xattrName.getXAttrName() != null) {
            names.add(xattrName.getXAttrName());
          }
        }
      }
      List<XAttr> xAttrs = np.getXAttrs(fullpath, (names != null &&
          !names.isEmpty()) ? XAttrHelper.buildXAttrs(names) : null);
      final String js = JsonUtil.toJsonString(xAttrs,
          xattrEncoding.getEncoding());
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case LISTXATTRS: {
      final List<XAttr> xAttrs = np.listXAttrs(fullpath);
      final String js = JsonUtil.toJsonString(xAttrs);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      final DeleteOpParam op,
      final RecursiveParam recursive,
      final SnapshotNameParam snapshotName
      ) throws IOException {
    final NameNode namenode = (NameNode)context.getAttribute("name.node");
    final NamenodeProtocols np = getRPCServer(namenode);

    switch(op.getValue()) {
    case DELETE: {
      final boolean b = np.delete(fullpath, recursive.getValue());
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case DELETESNAPSHOT: {
      np.deleteSnapshot(fullpath, snapshotName.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

          }
        }
      }

      // wait for the namenode to see the corruption
      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
      CorruptFileBlocks corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.getFiles().length;
      while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
      }
      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
      System.out.println("2. bad fsck out: " + outStr);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

   */
  @Test
  public void testMkdirRpcNonCanonicalPath() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    try {
      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
     
      for (String pathStr : NON_CANONICAL_PATHS) {
        try {
          nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
          fail("Did not fail when called with a non-canonicalized path: "
             + pathStr);
        } catch (InvalidPathException ipe) {
          // expected
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      String hosts[] = { "foo1.example.com", "foo2.example.com" };
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).
          racks(racks).hosts(hosts).build();
      cluster.waitActive();
     
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      Assert.assertNotNull(nn);
     
      // Wait for one DataNode to register.
      // The other DataNode will not be able to register up because of the rack mismatch.
      DatanodeInfo[] info;
      while (true) {
        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        Assert.assertFalse(info.length == 2);
        if (info.length == 1) {
          break;
        }
        Thread.sleep(1000);
      }
      // Set the network topology of the other node to the match the network
      // topology of the node that came up.
      int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
      int invalidIdx = validIdx == 1 ? 0 : 1;
      StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
      LOG.info("datanode " + validIdx + " came up with network location " +
        info[0].getNetworkLocation());

      // Restart the DN with the invalid topology and wait for it to register.
      cluster.restartDataNode(invalidIdx);
      Thread.sleep(5000);
      while (true) {
        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        if (info.length == 2) {
          break;
        }
        if (info.length == 0) {
          LOG.info("got no valid DNs");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

  /**
   * Test NameNode.getBlockLocations(..) on reading un-closed files.
   */
  @Test
  public void testGetBlockLocations() throws IOException {
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    final Path p = new Path(BASE_DIR, "file2.dat");
    final String src = p.toString();
    final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);

    // write a half block
    int len = BLOCK_SIZE >>> 1;
    writeFile(p, out, len);

    for(int i = 1; i < NUM_BLOCKS; ) {
      // verify consistency
      final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
      final List<LocatedBlock> blocks = lb.getLocatedBlocks();
      assertEquals(i, blocks.size());
      final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
      assertTrue(b instanceof BlockInfoUnderConstruction);

View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    Configuration conf = new HdfsConfiguration();
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(true).build();
     
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      URL fsName = DFSUtil.getInfoServer(
          cluster.getNameNode().getServiceRpcAddress(), conf,
          DFSUtil.getHttpClientScheme(conf)).toURL();

      // Make a finalized log on the server side.
      nn.rollEditLog();
      RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
      RemoteEditLog log = manifest.getLogs().get(0);
     
      NNStorage dstImage = Mockito.mock(NNStorage.class);
      Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written")))
        .when(dstImage).getFiles(
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of a volume -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      StorageDirectory sd1 = storage.getStorageDir(1);
     
      currentDir = sd0.getCurrentDir();
      FileUtil.setExecutable(currentDir, false);

      // Upload checkpoint when NN has a bad storage dir. This should
      // succeed and create the checkpoint in the good dir.
      secondary.doCheckpoint();
     
      GenericTestUtils.assertExists(
          new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
     
      // Restore the good dir
      FileUtil.setExecutable(currentDir, true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to both dirs
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of its only name dir -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
      currentDir = sd0.getCurrentDir();
      assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

      // Try to upload checkpoint -- this should fail since there are no
      // valid storage dirs
      try {
        secondary.doCheckpoint();
        fail("Did not fail to checkpoint when there are no valid storage dirs");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "No targets in destination storage", ioe);
      }
     
      // Restore the good dir
      assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to the restored name dir
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.