Examples of NamenodeProtocols


Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN saves namespace 3 times
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
      for (int i = 0; i < 3; i++) {
        nn.saveNamespace();
      }
      nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
     
      // Now the secondary tries to checkpoint again with its
      // old image in memory.
      secondary.doCheckpoint();
     
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

   * @param path
   * @param size
   * @throws IOException
   */
  private void triggerFailure(String path, long size) throws IOException {
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks =
      nn.getBlockLocations(path, 0, size).getLocatedBlocks();
   
    for (LocatedBlock lb : locatedBlocks) {
      DatanodeInfo dinfo = lb.getLocations()[1];
      ExtendedBlock b = lb.getBlock();
      try {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

   */
  private int countNNBlocks(Map<String, BlockLocs> map, String path, long size)
    throws IOException {
    int total = 0;
   
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks =
      nn.getBlockLocations(path, 0, size).getLocatedBlocks();
    //System.out.println("Number of blocks: " + locatedBlocks.size());
       
    for(LocatedBlock lb : locatedBlocks) {
      String blockId = ""+lb.getBlock().getBlockId();
      //System.out.print(blockId + ": ");
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    SecondaryNameNode secondary = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
          .format(true).build();
      NameNode nn = cluster.getNameNode();
      NamenodeProtocols nnRpc = nn.getRpcServer();

      secondary = startSecondaryNameNode(conf);
      // prepare checkpoint image
      secondary.doCheckpoint();
      CheckpointSignature sig = nnRpc.rollEditLog();
      // manipulate the CheckpointSignature fields
      sig.setBlockpoolID("somerandomebpid");
      sig.clusterID = "somerandomcid";
      try {
        sig.validateStorageInfo(nn.getFSImage()); // this should fail
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    Configuration conf = new HdfsConfiguration();

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();
    NameNode nn = cluster.getNameNode();
    NamenodeProtocols nnRpc = nn.getRpcServer();

    SecondaryNameNode secondary = startSecondaryNameNode(conf);
    // prepare checkpoint image
    secondary.doCheckpoint();
    CheckpointSignature sig = nnRpc.rollEditLog();
    // manipulate the CheckpointSignature fields
    sig.setBlockpoolID("somerandomebpid");
    sig.clusterID = "somerandomcid";
    try {
      sig.validateStorageInfo(nn.getFSImage()); // this should fail
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

    Configuration conf = new HdfsConfiguration();
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(true).build();
     
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      String fsName = NetUtils.getHostPortString(
          cluster.getNameNode().getHttpAddress());

      // Make a finalized log on the server side.
      nn.rollEditLog();
      RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
      RemoteEditLog log = manifest.getLogs().get(0);
     
      NNStorage dstImage = Mockito.mock(NNStorage.class);
      Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written")))
        .when(dstImage).getFiles(
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of a volume -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      StorageDirectory sd1 = storage.getStorageDir(1);
     
      currentDir = sd0.getCurrentDir();
      currentDir.setExecutable(false);

      // Upload checkpoint when NN has a bad storage dir. This should
      // succeed and create the checkpoint in the good dir.
      secondary.doCheckpoint();
     
      GenericTestUtils.assertExists(
          new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
     
      // Restore the good dir
      currentDir.setExecutable(true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to both dirs
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of its only name dir -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
      currentDir = sd0.getCurrentDir();
      currentDir.setExecutable(false);

      // Try to upload checkpoint -- this should fail since there are no
      // valid storage dirs
      try {
        secondary.doCheckpoint();
        fail("Did not fail to checkpoint when there are no valid storage dirs");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "No targets in destination storage", ioe);
      }
     
      // Restore the good dir
      currentDir.setExecutable(true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to the restored name dir
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN saves namespace 3 times
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      for (int i = 0; i < 3; i++) {
        nn.saveNamespace();
      }
      nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     
      // Now the secondary tries to checkpoint again with its
      // old image in memory.
      secondary.doCheckpoint();
     
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

  public void testLeaseAbort() throws Exception {
    MiniDFSCluster cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    try {
      cluster.waitActive();
      NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
      NamenodeProtocols spyNN = spy(preSpyNN);

      DFSClient dfs = new DFSClient(null, spyNN, conf, null);
      byte[] buf = new byte[1024];

      FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.