Examples of NamenodeProtocols


Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      }
    }, 500, 60000);

    // Send a cache report referring to a bogus block.  It is important that
    // the NameNode be robust against this.
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    DataNode dn0 = cluster.getDataNodes().get(0);
    String bpid = cluster.getNamesystem().getBlockPoolId();
    LinkedList<Long> bogusBlockIds = new LinkedList<Long> ();
    bogusBlockIds.add(999999L);
    nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);

    Path rootDir = helper.getDefaultWorkingDirectory(dfs);
    // Create the pool
    final String pool = "friendlyPool";
    nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
    // Create some test files
    final int numFiles = 2;
    final int numBlocksPerFile = 2;
    final List<String> paths = new ArrayList<String>(numFiles);
    for (int i=0; i<numFiles; i++) {
      Path p = new Path(rootDir, "testCachePaths-" + i);
      FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile,
          (int)BLOCK_SIZE);
      paths.add(p.toUri().getPath());
    }
    // Check the initial statistics at the namenode
    waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
    // Cache and check each path in sequence
    int expected = 0;
    for (int i=0; i<numFiles; i++) {
      CacheDirectiveInfo directive =
          new CacheDirectiveInfo.Builder().
            setPath(new Path(paths.get(i))).
            setPool(pool).
            build();
      nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
      expected += numBlocksPerFile;
      waitForCachedBlocks(namenode, expected, expected,
          "testWaitForCachedReplicas:1");
    }

    // Check that the datanodes have the right cache values
    DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
    assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
    long totalUsed = 0;
    for (DatanodeInfo dn : live) {
      final long cacheCapacity = dn.getCacheCapacity();
      final long cacheUsed = dn.getCacheUsed();
      final long cacheRemaining = dn.getCacheRemaining();
      assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
      assertEquals("Capacity not equal to used + remaining",
          cacheCapacity, cacheUsed + cacheRemaining);
      assertEquals("Remaining not equal to capacity - used",
          cacheCapacity - cacheUsed, cacheRemaining);
      totalUsed += cacheUsed;
    }
    assertEquals(expected*BLOCK_SIZE, totalUsed);

    // Uncache and check each path in sequence
    RemoteIterator<CacheDirectiveEntry> entries =
      new CacheDirectiveIterator(nnRpc, null);
    for (int i=0; i<numFiles; i++) {
      CacheDirectiveEntry entry = entries.next();
      nnRpc.removeCacheDirective(entry.getInfo().getId());
      expected -= numBlocksPerFile;
      waitForCachedBlocks(namenode, expected, expected,
          "testWaitForCachedReplicas:2");
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

          }
        }
      }

      // wait for the namenode to see the corruption
      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
      CorruptFileBlocks corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.getFiles().length;
      while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
      }
      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
      System.out.println("2. bad fsck out: " + outStr);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1).build();
    try {
      FileSystem fs = cluster.getFileSystem();
      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();

      for (String pathStr : NON_CANONICAL_PATHS) {
        System.out.println("Creating " + pathStr + " by " + method);
        switch (method) {
        case DIRECT_NN_RPC:
          try {
            nnrpc.create(pathStr, new FsPermission((short)0755), "client",
                new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
                true, (short)1, 128*1024*1024L);
            fail("Should have thrown exception when creating '"
                + pathStr + "'" + " by " + method);
          } catch (InvalidPathException ipe) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
      cluster.waitActive();
      assertEquals(numDataNodes, cluster.getDataNodes().size());

      final NameNode nn = cluster.getNameNode();
      final NamenodeProtocols nnProto = nn.getRpcServer();
      final BlockManager bm = nn.getNamesystem().getBlockManager();
      final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();

      // set a short token lifetime (1 second) initially
      SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);

      Path fileToRead = new Path(FILE_TO_READ);
      FileSystem fs = cluster.getFileSystem();
      createFile(fs, fileToRead);

      /*
       * setup for testing expiration handling of cached tokens
       */

      // read using blockSeekTo(). Acquired tokens are cached in in1
      FSDataInputStream in1 = fs.open(fileToRead);
      assertTrue(checkFile1(in1));
      // read using blockSeekTo(). Acquired tokens are cached in in2
      FSDataInputStream in2 = fs.open(fileToRead);
      assertTrue(checkFile1(in2));
      // read using fetchBlockByteRange(). Acquired tokens are cached in in3
      FSDataInputStream in3 = fs.open(fileToRead);
      assertTrue(checkFile2(in3));

      /*
       * testing READ interface on DN using a BlockReader
       */
      DFSClient client = null;
      try {
        client = new DFSClient(new InetSocketAddress("localhost",
          cluster.getNameNodePort()), conf);
      } finally {
        if (client != null) client.close();
      }
      List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(
          FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
      LocatedBlock lblock = locatedBlocks.get(0); // first block
      Token<BlockTokenIdentifier> myToken = lblock.getBlockToken();
      // verify token is not expired
      assertFalse(SecurityTestUtil.isBlockTokenExpired(myToken));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

  public void testLeaseAbort() throws Exception {
    MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    try {
      cluster.waitActive();
      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
      NamenodeProtocols spyNN = spy(preSpyNN);

      DFSClient dfs = new DFSClient(null, spyNN, conf, null);
      byte[] buf = new byte[1024];

      FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

          .build();
      InetSocketAddress addr = new InetSocketAddress(
        "localhost",
        cluster.getNameNodePort());
      DFSClient client = new DFSClient(addr, conf);
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();

      // register a datanode
      DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
          "fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT,
          DN_IPC_PORT);
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
          .getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo)
          .getLayoutVersion();
      DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Expected a registered datanode", 1, report.length);

      // register the same datanode again with a different storage ID
      dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
          "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT,
          DN_INFO_SECURE_PORT, DN_IPC_PORT);
      dnReg = new DatanodeRegistration(dnId,
          mockStorageInfo, null, VersionInfo.getVersion());
      rpcServer.registerDatanode(dnReg);

      report = client.datanodeReport(DatanodeReportType.ALL);
      assertEquals("Datanode with changed storage ID not recognized",
          1, report.length);
    } finally {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    try {
      cluster = new MiniDFSCluster.Builder(conf)
          .numDataNodes(0)
          .build();
     
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
      doReturn(123).when(mockDnReg).getXferPort();
      doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
     
      // Should succeed when software versions are the same.
      doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should succeed when software version of DN is above minimum required by NN.
      doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should fail when software version of DN is below minimum required by NN.
      doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail("Should not have been able to register DN with too-low version.");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains(
            "The reported DataNode version is too low", ive);
        LOG.info("Got expected exception", ive);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    try {
      cluster = new MiniDFSCluster.Builder(conf)
          .numDataNodes(0)
          .build();
     
      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
     
      long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
      StorageInfo mockStorageInfo = mock(StorageInfo.class);
      doReturn(nnCTime).when(mockStorageInfo).getCTime();
     
      DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
      doReturn(HdfsConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
      doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
      doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
     
      // Should succeed when software versions are the same and CTimes are the
      // same.
      doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
      doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
      doReturn(123).when(mockDnReg).getXferPort();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should succeed when software versions are the same and CTimes are
      // different.
      doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
      rpcServer.registerDatanode(mockDnReg);
     
      // Should fail when software version of DN is different from NN and CTimes
      // are different.
      doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
      try {
        rpcServer.registerDatanode(mockDnReg);
        fail("Should not have been able to register DN with different software" +
            " versions and CTimes");
      } catch (IncorrectVersionException ive) {
        GenericTestUtils.assertExceptionContains(
            "does not match CTime of NN", ive);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final NamenodeProtocols np = namenode.getRpcServer();
      final HdfsFileStatus status = np.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0).getLocations(), false, conf);
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      final TokenArgumentParam delegationTokenArgument
      ) throws IOException, URISyntaxException {

    final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
    final NameNode namenode = (NameNode)context.getAttribute("name.node");
    final NamenodeProtocols np = namenode.getRpcServer();

    switch(op.getValue()) {
    case CREATE:
    {
      final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
          fullpath, op.getValue(), -1L, blockSize.getValue(conf),
          permission, overwrite, bufferSize, replication, blockSize);
      return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case MKDIRS:
    {
      final boolean b = np.mkdirs(fullpath, permission.getFsPermission(), true);
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case CREATESYMLINK:
    {
      np.createSymlink(destination.getValue(), fullpath,
          PermissionParam.getDefaultFsPermission(), createParent.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case RENAME:
    {
      final EnumSet<Options.Rename> s = renameOptions.getValue();
      if (s.isEmpty()) {
        final boolean b = np.rename(fullpath, destination.getValue());
        final String js = JsonUtil.toJsonString("boolean", b);
        return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
      } else {
        np.rename2(fullpath, destination.getValue(),
            s.toArray(new Options.Rename[s.size()]));
        return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
      }
    }
    case SETREPLICATION:
    {
      final boolean b = np.setReplication(fullpath, replication.getValue(conf));
      final String js = JsonUtil.toJsonString("boolean", b);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case SETOWNER:
    {
      if (owner.getValue() == null && group.getValue() == null) {
        throw new IllegalArgumentException("Both owner and group are empty.");
      }

      np.setOwner(fullpath, owner.getValue(), group.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETPERMISSION:
    {
      np.setPermission(fullpath, permission.getFsPermission());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case SETTIMES:
    {
      np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    case RENEWDELEGATIONTOKEN:
    {
      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
      token.decodeFromUrlString(delegationTokenArgument.getValue());
      final long expiryTime = np.renewDelegationToken(token);
      final String js = JsonUtil.toJsonString("long", expiryTime);
      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
    }
    case CANCELDELEGATIONTOKEN:
    {
      final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
      token.decodeFromUrlString(delegationTokenArgument.getValue());
      np.cancelDelegationToken(token);
      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
    }
    default:
      throw new UnsupportedOperationException(op + " is not supported");
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.