Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols


    SecondaryNameNode secondary = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
          .format(true).build();
      NameNode nn = cluster.getNameNode();
      NamenodeProtocols nnRpc = nn.getRpcServer();

      secondary = startSecondaryNameNode(conf);
      // prepare checkpoint image
      secondary.doCheckpoint();
      CheckpointSignature sig = nnRpc.rollEditLog();
      // manipulate the CheckpointSignature fields
      sig.setBlockpoolID("somerandomebpid");
      sig.clusterID = "somerandomcid";
      try {
        sig.validateStorageInfo(nn.getFSImage()); // this should fail
View Full Code Here


    Configuration conf = new HdfsConfiguration();
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
          .format(true).build();
     
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      String fsName = NetUtils.getHostPortString(
          cluster.getNameNode().getHttpAddress());

      // Make a finalized log on the server side.
      nn.rollEditLog();
      RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
      RemoteEditLog log = manifest.getLogs().get(0);
     
      NNStorage dstImage = Mockito.mock(NNStorage.class);
      Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written")))
        .when(dstImage).getFiles(
View Full Code Here

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of a volume -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      StorageDirectory sd1 = storage.getStorageDir(1);
     
      currentDir = sd0.getCurrentDir();
      FileUtil.setExecutable(currentDir, false);

      // Upload checkpoint when NN has a bad storage dir. This should
      // succeed and create the checkpoint in the good dir.
      secondary.doCheckpoint();
     
      GenericTestUtils.assertExists(
          new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
     
      // Restore the good dir
      FileUtil.setExecutable(currentDir, true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to both dirs
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN experiences failure of its only name dir -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
      currentDir = sd0.getCurrentDir();
      assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

      // Try to upload checkpoint -- this should fail since there are no
      // valid storage dirs
      try {
        secondary.doCheckpoint();
        fail("Did not fail to checkpoint when there are no valid storage dirs");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "No targets in destination storage", ioe);
      }
     
      // Restore the good dir
      assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
      nn.restoreFailedStorage("true");
      nn.rollEditLog();

      // Checkpoint again -- this should upload to the restored name dir
      secondary.doCheckpoint();
     
      assertNNHasCheckpoints(cluster, ImmutableList.of(8));
View Full Code Here

      // Checkpoint once
      secondary.doCheckpoint();

      // Now primary NN saves namespace 3 times
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
      for (int i = 0; i < 3; i++) {
        nn.saveNamespace();
      }
      nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
     
      // Now the secondary tries to checkpoint again with its
      // old image in memory.
      secondary.doCheckpoint();
     
View Full Code Here

          }
        }
      }

      // wait for the namenode to see the corruption
      final NamenodeProtocols namenode = cluster.getNameNodeRpc();
      CorruptFileBlocks corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.getFiles().length;
      while (numCorrupt == 0) {
        Thread.sleep(1000);
        corruptFileBlocks = namenode
            .listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.getFiles().length;
      }
      outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
      System.out.println("2. bad fsck out: " + outStr);
View Full Code Here

                                   .format(false)
                                   .manageDataDfsDirs(false)
                                   .manageNameDfsDirs(false)
                                   .build();
       
      NamenodeProtocols namenode = cluster.getNameNodeRpc();
      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
      namenodeStorageClusterID = namenode.versionRequest().getClusterID();
      namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
     
      FileSystem fs = FileSystem.get(config);
      Path baseDir = new Path("/TestUpgrade");
      fs.mkdirs(baseDir);
     
      // write some files
      int bufferSize = 4096;
      byte[] buffer = new byte[bufferSize];
      for(int i=0; i < bufferSize; i++)
        buffer[i] = (byte)('0' + i % 50);
      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
     
      // save image
      namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
      namenode.saveNamespace();
      namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
     
      // write more files
      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
      bpid = cluster.getNamesystem(0).getBlockPoolId();
View Full Code Here

    // create an uncompressed image
    LOG.info("Create an uncompressed fsimage");
    NameNode namenode = new NameNode(conf);
    namenode.getNamesystem().mkdirs("/test",
        new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace();
    namenode.stop();
    namenode.join();

    // compress image using default codec
    LOG.info("Read an uncomressed image and store it compressed using default codec.");
View Full Code Here

    checkNameSpace(conf);
  }

  private void checkNameSpace(Configuration conf) throws IOException {
    NameNode namenode = new NameNode(conf);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace();
    namenode.stop();
    namenode.join();
  }
View Full Code Here

      cluster = new MiniDFSCluster.Builder(config)
      .numDataNodes(numDatanodes).setupHostsFile(true).build();
      cluster.waitActive();
 
      cluster.restartNameNode();
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      assertNotNull(nn);
      assertTrue(cluster.isDataNodeUp());
     
      DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      for (int i = 0 ; i < 5 && info.length != numDatanodes; i++) {
        Thread.sleep(HEARTBEAT_INTERVAL * 1000);
        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
      }
      assertEquals("Number of live nodes should be "+numDatanodes, numDatanodes,
          info.length);
     
    } catch (IOException e) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.