Package org.apache.hadoop.hdfs.server.common

Examples of org.apache.hadoop.hdfs.server.common.StorageInfo


  @Test
  public void testRollback() throws Exception {
    File[] baseDirs;
    UpgradeUtilities.initialize();
   
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new HdfsConfiguration();
      conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);     
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
     
      log("Normal NameNode rollback", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      checkResult(NAME_NODE, nameNodeDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("Normal DataNode rollback", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      checkResult(DATA_NODE, dataNodeDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     
      log("Normal BlockPool rollback", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "current",
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      // Create a previous snapshot for the blockpool
      UpgradeUtilities.createBlockPoolStorageDirs(dataNodeDirs, "previous",
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      // Put newer layout version in current.
      storageInfo = new StorageInfo(
          UpgradeUtilities.getCurrentLayoutVersion()-1,
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster),
          UpgradeUtilities.getCurrentFsscTime(cluster));

      // Overwrite VERSION file in the current directory of
      // volume directories and block pool slice directories
      // with a layout version from future.
      File[] dataCurrentDirs = new File[dataNodeDirs.length];
      for (int i=0; i<dataNodeDirs.length; i++) {
        dataCurrentDirs[i] = new File((new Path(dataNodeDirs[i]
            + "/current")).toString());
      }
      UpgradeUtilities.createDataNodeVersionFile(
          dataCurrentDirs,
          storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));

      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      assertTrue(cluster.isDataNodeUp());

      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("NameNode rollback without existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      startNameNodeShouldFail(StartupOption.ROLLBACK,
          "None of the storage directories contain previous fs state");
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("DataNode rollback without existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.UPGRADE)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode rollback with future stored layout version in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(Integer.MIN_VALUE,
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster),
          UpgradeUtilities.getCurrentFsscTime(cluster));
     
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
     
      startBlockPoolShouldFail(StartupOption.ROLLBACK,
          cluster.getNamesystem().getBlockPoolId());
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     
      log("DataNode rollback with newer fsscTime in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
                                                .format(false)
                                                .manageDataDfsDirs(false)
                                                .manageNameDfsDirs(false)
                                                .startupOption(StartupOption.ROLLBACK)
                                                .build();
     
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
            UpgradeUtilities.getCurrentNamespaceID(cluster),
            UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
     
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
     
      startBlockPoolShouldFail(StartupOption.ROLLBACK,
          cluster.getNamesystem().getBlockPoolId());
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("NameNode rollback with no edits file", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      deleteMatchingFiles(baseDirs, "edits.*");
      startNameNodeShouldFail(StartupOption.ROLLBACK,
          "Gap in transactions");
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode rollback with no image file", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      deleteMatchingFiles(baseDirs, "fsimage_.*");
      startNameNodeShouldFail(StartupOption.ROLLBACK,
          "No valid image files found");
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode rollback with corrupt version file", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      for (File f : baseDirs) {
        UpgradeUtilities.corruptFile(
            new File(f,"VERSION"),
            "layoutVersion".getBytes(Charsets.UTF_8),
            "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
      }
      startNameNodeShouldFail(StartupOption.ROLLBACK,
          "file VERSION has layoutVersion missing");

      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode rollback with old layout version in previous", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      storageInfo = new StorageInfo(1,
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
     
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
View Full Code Here


   * a number of valid and invalid conditions.
   */
  @Test
  public void testUpgrade() throws Exception {
    File[] baseDirs;
    StorageInfo storageInfo = null;
    for (int numDirs = 1; numDirs <= 2; numDirs++) {
      conf = new HdfsConfiguration();
      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
      String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
      String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
     
      log("Normal NameNode upgrade", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      checkNameNode(nameNodeDirs, EXPECTED_TXID);
      if (numDirs > 1)
        TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("Normal DataNode upgrade", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     
      log("NameNode upgrade with existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("DataNode upgrade with existing previous dir", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
      checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("DataNode upgrade with future stored layout version in current", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE,
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster),
          UpgradeUtilities.getCurrentFsscTime(cluster));
     
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
     
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     
      log("DataNode upgrade with newer fsscTime in current", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      cluster = createCluster();
      baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
      storageInfo = new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
          UpgradeUtilities.getCurrentNamespaceID(cluster),
          UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE);
         
      UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
      // Ensure corresponding block pool failed to initialized
      startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities
          .getCurrentBlockPoolID(null));
      cluster.shutdown();
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
      UpgradeUtilities.createEmptyDirs(dataNodeDirs);

      log("NameNode upgrade with no edits file", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode upgrade with no image file", numDirs);
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode upgrade with corrupt version file", numDirs);
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      for (File f : baseDirs) {
        UpgradeUtilities.corruptFile(
            new File(f,"VERSION"),
            "layoutVersion".getBytes(Charsets.UTF_8),
            "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
      }
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode upgrade with old layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
     
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
          UpgradeUtilities.getCurrentBlockPoolID(cluster));
     
      startNameNodeShouldFail(StartupOption.UPGRADE);
      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     
      log("NameNode upgrade with future layout version in current", numDirs);
      baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
      storageInfo = new StorageInfo(Integer.MIN_VALUE,
          UpgradeUtilities.getCurrentNamespaceID(null),
          UpgradeUtilities.getCurrentClusterID(null),
          UpgradeUtilities.getCurrentFsscTime(null));
     
      UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo,
View Full Code Here

   * Create a DatanodeRegistration for a specific block pool.
   * @param nsInfo the namespace info from the first part of the NN handshake
   */
  DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo)
      throws IOException {
    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
    if (storageInfo == null) {
      // it's null in the case of SimulatedDataSet
      storageInfo = new StorageInfo(nsInfo);
    }

    DatanodeID dnId = new DatanodeID(
        streamingAddr.getAddress().getHostAddress(), hostName,
        storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
View Full Code Here

        throw new IOException("Startup option not set.");
      }
      final String bpid = nsInfo.getBlockPoolID();
      //read storage info, lock data dirs and transition fs state if necessary
      storage.recoverTransitionRead(this, bpid, nsInfo, dataDirs, startOpt);
      final StorageInfo bpStorage = storage.getBPStorage(bpid);
      LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
          + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
          + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
    }

    // If this is a newly formatted DataNode then assign a new DatanodeUuid.
View Full Code Here

    return new DatanodeDescriptor(dnId, rackLocation);
  }
 
  public static DatanodeRegistration getLocalDatanodeRegistration() {
    return new DatanodeRegistration(getLocalDatanodeID(),
        new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion());
  }
View Full Code Here

   * <li>Check 5: In all other cases the startup fails.</li>
   * </ol>
   * </pre>
   */
  boolean isVersionCompatible(StorageData namenodeSd, StorageData datanodeSd) {
    final StorageInfo namenodeVer = namenodeSd.storageInfo;
    final StorageInfo datanodeVer = datanodeSd.storageInfo;
    // check #0
    if (namenodeVer.getNamespaceID() != datanodeVer.getNamespaceID()) {
      LOG.info("namespaceIDs are not equal: isVersionCompatible=false");
      return false;
    }
    // check #1
    if (!namenodeVer.getClusterID().equals(datanodeVer.getClusterID())) {
      LOG.info("clusterIDs are not equal: isVersionCompatible=false");
      return false;
    }
    // check #2
    if (!namenodeSd.blockPoolId.equals(datanodeSd.blockPoolId)) {
      LOG.info("blockPoolIDs are not equal: isVersionCompatible=false");
      return false;
    }
    // check #3
    int softwareLV = HdfsConstants.LAYOUT_VERSION;  // will also be Namenode's LV
    int storedLV = datanodeVer.getLayoutVersion();
    if (softwareLV == storedLV && 
        datanodeVer.getCTime() == namenodeVer.getCTime())
      {
        LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
        return true;
      }
    // check #4
    long absSoftwareLV = Math.abs((long)softwareLV);
    long absStoredLV = Math.abs((long)storedLV);
    if (absSoftwareLV > absStoredLV ||
        (softwareLV == storedLV &&
         datanodeVer.getCTime() < namenodeVer.getCTime()))
      {
        LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
        return true;
      }
    // check #5
View Full Code Here

    private final StorageInfo storageInfo;
    private final String blockPoolId;
   
    StorageData(int layoutVersion, int namespaceId, String clusterId,
        long cTime, String bpid) {
      storageInfo = new StorageInfo(layoutVersion, namespaceId, clusterId,
          cTime);
      blockPoolId = bpid;
    }
View Full Code Here

    assertEquals(NamenodeRole.NAMENODE,
        PBHelper.convert(NamenodeRoleProto.NAMENODE));
  }

  private static StorageInfo getStorageInfo() {
    return new StorageInfo(1, 2, "cid", 3);
  }
View Full Code Here

    return new StorageInfo(1, 2, "cid", 3);
  }

  @Test
  public void testConvertStoragInfo() {
    StorageInfo info = getStorageInfo();
    StorageInfoProto infoProto = PBHelper.convert(info);
    StorageInfo info2 = PBHelper.convert(infoProto);
    assertEquals(info.getClusterID(), info2.getClusterID());
    assertEquals(info.getCTime(), info2.getCTime());
    assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
    assertEquals(info.getNamespaceID(), info2.getNamespaceID());
  }
View Full Code Here

    assertEquals(info.getNamespaceID(), info2.getNamespaceID());
  }

  @Test
  public void testConvertNamenodeRegistration() {
    StorageInfo info = getStorageInfo();
    NamenodeRegistration reg = new NamenodeRegistration("address:999",
        "http:1000", info, NamenodeRole.NAMENODE);
    NamenodeRegistrationProto regProto = PBHelper.convert(reg);
    NamenodeRegistration reg2 = PBHelper.convert(regProto);
    assertEquals(reg.getAddress(), reg2.getAddress());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.common.StorageInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.