Examples of FSImage


Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
    storage.format(nsInfo);

    // Load the newly formatted image, using all of the directories (including shared
    // edits)
    FSImage image = new FSImage(conf);
    assert image.getEditLog().isOpenForRead() :
        "Expected edit log to be open for read";
   
    // Ensure that we have enough edits already in the shared directory to
    // start up from the last checkpoint on the active.
    if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
      return ERR_CODE_LOGS_UNAVAILABLE;
    }
   
    image.getStorage().writeTransactionIdFileToStorage(curTxId);

    // Download that checkpoint into our storage directories.
    MD5Hash hash = TransferFsImage.downloadImageToStorage(
        otherHttpAddr.toString(), imageTxId,
        storage, true);
    image.saveDigestAndRenameCheckpointImage(imageTxId, hash);
    return 0;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    // transitionToActive RPC takes the write lock before calling
    // tailer.stop() -- so if we're not interruptible, it will
    // deadlock.
    namesystem.writeLockInterruptibly();
    try {
      FSImage image = namesystem.getFSImage();

      long lastTxnId = image.getLastAppliedTxId();
     
      if (LOG.isDebugEnabled()) {
        LOG.debug("lastTxnId: " + lastTxnId);
      }
      Collection<EditLogInputStream> streams;
      try {
        streams = editLog.selectInputStreams(lastTxnId + 1, 0, null, false);
      } catch (IOException ioe) {
        // This is acceptable. If we try to tail edits in the middle of an edits
        // log roll, i.e. the last one has been finalized but the new inprogress
        // edits file hasn't been started yet.
        LOG.warn("Edits tailer failed to find any streams. Will try again " +
            "later.", ioe);
        return;
      }
      if (LOG.isDebugEnabled()) {
        LOG.debug("edit streams to load from: " + streams.size());
      }
     
      // Once we have streams to load, errors encountered are legitimate cause
      // for concern, so we don't catch them here. Simple errors reading from
      // disk are ignored.
      long editsLoaded = 0;
      try {
        editsLoaded = image.loadEdits(streams, namesystem, null);
      } catch (EditLogInputException elie) {
        editsLoaded = elie.getNumEditsLoaded();
        throw elie;
      } finally {
        if (editsLoaded > 0 || LOG.isDebugEnabled()) {
          LOG.info(String.format("Loaded %d edits starting from txid %d ",
              editsLoaded, lastTxnId));
        }
      }

      if (editsLoaded > 0) {
        lastLoadTimestamp = now();
      }
      lastLoadedTxnId = image.getLastAppliedTxId();
    } finally {
      namesystem.writeUnlock();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    cluster.getConfiguration(1).setInt(
        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
    cluster.restartNameNode(1);
    nn1 = cluster.getNameNode(1);
    FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
   
    // We shouldn't save any checkpoints at txid=0
    Thread.sleep(1000);
    Mockito.verify(spyImage1, Mockito.never())
      .saveNamespace((FSNamesystem) Mockito.anyObject());
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

   */
  @Test(timeout=300000)
  public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
   
    // Set it up so that we know when the SBN checkpoint starts and ends.
    FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
    DelayAnswer answerer = new DelayAnswer(LOG);
    Mockito.doAnswer(answerer).when(spyImage1)
        .saveNamespace(Mockito.any(FSNamesystem.class),
            Mockito.eq(NameNodeFile.IMAGE), Mockito.any(Canceler.class));

View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

 
  @Test(timeout=300000)
  public void testReadsAllowedDuringCheckpoint() throws Exception {
   
    // Set it up so that we know when the SBN checkpoint starts and ends.
    FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
    DelayAnswer answerer = new DelayAnswer(LOG);
    Mockito.doAnswer(answerer).when(spyImage1)
        .saveNamespace(Mockito.any(FSNamesystem.class),
            Mockito.any(NameNodeFile.class),
            Mockito.any(Canceler.class));
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    // Format the storage (writes VERSION file)
    storage.format(nsInfo);

    // Load the newly formatted image, using all of the directories (including shared
    // edits)
    FSImage image = new FSImage(conf);
    try {
      image.getStorage().setStorageInfo(storage);
      image.initEditLog(StartupOption.REGULAR);
      assert image.getEditLog().isOpenForRead() :
        "Expected edit log to be open for read";

      // Ensure that we have enough edits already in the shared directory to
      // start up from the last checkpoint on the active.
      if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
        return ERR_CODE_LOGS_UNAVAILABLE;
      }

      image.getStorage().writeTransactionIdFileToStorage(curTxId);

      // Download that checkpoint into our storage directories.
      MD5Hash hash = TransferFsImage.downloadImageToStorage(
        otherHttpAddr, imageTxId, storage, true);
      image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
          hash);
    } catch (IOException ioe) {
      image.close();
      throw ioe;
    }
    return 0;
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    try {
      assert namesystem.getEditLog().isOpenForRead() :
        "Standby Checkpointer should only attempt a checkpoint when " +
        "NN is in standby mode, but the edit logs are in an unexpected state";
     
      FSImage img = namesystem.getFSImage();
     
      long prevCheckpointTxId = img.getStorage().getMostRecentCheckpointTxId();
      long thisCheckpointTxId = img.getLastAppliedOrWrittenTxId();
      assert thisCheckpointTxId >= prevCheckpointTxId;
      if (thisCheckpointTxId == prevCheckpointTxId) {
        LOG.info("A checkpoint was triggered but the Standby Node has not " +
            "received any transactions since the last checkpoint at txid " +
            thisCheckpointTxId + ". Skipping...");
        return;
      }

      if (namesystem.isRollingUpgrade()
          && !namesystem.getFSImage().hasRollbackFSImage()) {
        // if we will do rolling upgrade but have not created the rollback image
        // yet, name this checkpoint as fsimage_rollback
        imageType = NameNodeFile.IMAGE_ROLLBACK;
      } else {
        imageType = NameNodeFile.IMAGE;
      }
      img.saveNamespace(namesystem, imageType, canceler);
      txid = img.getStorage().getMostRecentCheckpointTxId();
      assert txid == thisCheckpointTxId : "expected to save checkpoint at txid=" +
        thisCheckpointTxId + " but instead saved at txid=" + txid;

      // Save the legacy OIV image, if the output dir is defined.
      String outputDir = checkpointConf.getLegacyOivImageDir();
      if (outputDir != null && !outputDir.isEmpty()) {
        img.saveLegacyOIVImage(namesystem, outputDir, canceler);
      }
    } finally {
      namesystem.longReadUnlock();
    }
   
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

  static int getCanceledCount() {
    return canceledCount;
  }

  private long countUncheckpointedTxns() {
    FSImage img = namesystem.getFSImage();
    return img.getLastAppliedOrWrittenTxId() -
      img.getStorage().getMostRecentCheckpointTxId();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

      dfsCluster.transitionToActive(0);
      DistributedFileSystem dfs = dfsCluster.getFileSystem(0);
      dfs.mkdirs(foo);

      FSImage fsimage = dfsCluster.getNamesystem(0).getFSImage();

      // start rolling upgrade
      RollingUpgradeInfo info = dfs
          .rollingUpgrade(RollingUpgradeAction.PREPARE);
      Assert.assertTrue(info.isStarted());
      dfs.mkdirs(bar);

      queryForPreparation(dfs);

      // The NN should have a copy of the fsimage in case of rollbacks.
      Assert.assertTrue(fsimage.hasRollbackFSImage());

      info = dfs.rollingUpgrade(RollingUpgradeAction.FINALIZE);
      Assert.assertTrue(info.isFinalized());
      Assert.assertTrue(dfs.exists(foo));

      // Once finalized, there should be no more fsimage for rollbacks.
      Assert.assertFalse(fsimage.hasRollbackFSImage());

      // Should have no problem in restart and replaying edits that include
      // the FINALIZE op.
      dfsCluster.restartNameNode(0);
    } finally {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSImage

    cluster.getConfiguration(1).setInt(
        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
    cluster.restartNameNode(1);
    nn1 = cluster.getNameNode(1);
    FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
   
    // We shouldn't save any checkpoints at txid=0
    Thread.sleep(1000);
    Mockito.verify(spyImage1, Mockito.never())
      .saveNamespace((FSNamesystem) Mockito.anyObject());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.