Package org.apache.hadoop.hdfs.server.common.Storage

Examples of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory




      // now verify that image and edits are created in the different directories
      FSImage image = nn.getFSImage();
      StorageDirectory sd = image.getStorage().getStorageDir(0); //only one
      assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
      image.getStorage();
      File imf = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE, 0);
      image.getStorage();
      File edf = NNStorage.getStorageFile(sd, NameNodeFile.EDITS, 0);
      LOG.info("--image file " + imf.getAbsolutePath() + "; len = " + imf.length());
View Full Code Here


      new HashMap<String, Map<File, StorageDirType>>();
   
    Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
    for (Iterator<StorageDirectory> it
        = getFSImage().getStorage().dirIterator(); it.hasNext();) {
      StorageDirectory st = it.next();
      activeDirs.put(st.getRoot(), st.getStorageDirType());
    }
    statusMap.put("active", activeDirs);
   
    List<Storage.StorageDirectory> removedStorageDirs
        = getFSImage().getStorage().getRemovedStorageDirs();
    Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
    for (StorageDirectory st : removedStorageDirs) {
      failedDirs.put(st.getRoot(), st.getStorageDirType());
    }
    statusMap.put("failed", failedDirs);
   
    return JSON.toString(statusMap);
  }
View Full Code Here

          files.add(NNStorage.getFinalizedEditsFileName(
              Long.parseLong(m.group(1)),
              Long.parseLong(m.group(2))));
        }
      }
      StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
          NameNodeDirType.EDITS, false,
          files.toArray(new String[0]));
      sds.add(sd);
      URI u = URI.create("file:///storage"+ Math.random());
      Mockito.doReturn(sd).when(storage).getStorageDirectory(u);
View Full Code Here

          .build();
      cluster.waitActive();
      fileSys = cluster.getFileSystem();
      image = cluster.getNameNode().getFSImage();
      assertTrue(!fileSys.exists(file1));
      StorageDirectory sd = image.getStorage().getStorageDir(0);
     
      File latestImageBeforeCheckpoint = FSImageTestUtil.findLatestImageFile(sd);
      long fsimageLength = latestImageBeforeCheckpoint.length();
      //
      // Make the checkpoint
View Full Code Here

    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
   
    // Start a NN, and verify that lock() fails in all of the configured
    // directories
    StorageDirectory savedSd = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
View Full Code Here

        editsDir.getAbsolutePath());
    MiniDFSCluster cluster = null;
   
    // Start a NN, and verify that lock() fails in all of the configured
    // directories
    StorageDirectory savedSd = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
          .numDataNodes(0).build();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
View Full Code Here

    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
      StorageDirectory savedSd = null;
      // Start a secondary NN, then make sure that all of its storage
      // dirs got locked.
      secondary = startSecondaryNameNode(conf);
     
      NNStorage storage = secondary.getFSImage().getStorage();
      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
        savedSd = sd;
      }
      LOG.info("===> Shutting down first 2NN");
      secondary.shutdown();
      secondary = null;

      LOG.info("===> Locking a dir, starting second 2NN");
      // Lock one of its dirs, make sure it fails to start
      LOG.info("Trying to lock" + savedSd);
      savedSd.lock();
      try {
        secondary = startSecondaryNameNode(conf);
        assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
            savedSd.isLockSupported());
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("already locked", ioe);
      } finally {
        savedSd.unlock();
      }
     
    } finally {
      cleanup(secondary);
      secondary = null;
View Full Code Here

   */
  @Test
  public void testStorageAlreadyLockedErrorMessage() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    StorageDirectory savedSd = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
        savedSd = sd;
      }
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
      try {
        // try to lock the storage that's already locked
        savedSd.lock();
        fail("Namenode should not be able to lock a storage that is already locked");
      } catch (IOException ioe) {
        // cannot read lock file on Windows, so message cannot get JVM name
        String lockingJvmName = Path.WINDOWS ? "" :
          " " + ManagementFactory.getRuntimeMXBean().getName();
View Full Code Here

      // Now primary NN experiences failure of a volume -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      StorageDirectory sd1 = storage.getStorageDir(1);
     
      currentDir = sd0.getCurrentDir();
      FileUtil.setExecutable(currentDir, false);

      // Upload checkpoint when NN has a bad storage dir. This should
      // succeed and create the checkpoint in the good dir.
      secondary.doCheckpoint();
     
      GenericTestUtils.assertExists(
          new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
     
      // Restore the good dir
      FileUtil.setExecutable(currentDir, true);
      nn.restoreFailedStorage("true");
      nn.rollEditLog();
View Full Code Here

      // Now primary NN experiences failure of its only name dir -- fake by
      // setting its current dir to a-x permissions
      NamenodeProtocols nn = cluster.getNameNodeRpc();
      NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
      StorageDirectory sd0 = storage.getStorageDir(0);
      assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
      currentDir = sd0.getCurrentDir();
      assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));

      // Try to upload checkpoint -- this should fail since there are no
      // valid storage dirs
      try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.