Package org.apache.hadoop.hdfs.server.namenode.startupprogress

Examples of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress


        fsImage.recoverTransitionRead(startOpt, this, recovery) && !haEnabled;
      if (needToSave) {
        fsImage.saveNamespace(this);
      } else {
        // No need to save, so mark the phase done.
        StartupProgress prog = NameNode.getStartupProgress();
        prog.beginPhase(Phase.SAVING_CHECKPOINT);
        prog.endPhase(Phase.SAVING_CHECKPOINT);
      }
      // This will start a new log segment and write to the seen_txid file, so
      // we shouldn't do it when coming up in standby state
      if (!haEnabled) {
        fsImage.openEditLogForWrite();
View Full Code Here


    try {
      nnResourceChecker = new NameNodeResourceChecker(conf);
      checkAvailableResources();
      assert safeMode != null &&
        !safeMode.isPopulatingReplQueues();
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAFEMODE);
      prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS,
        getCompleteBlocksTotal());
      setBlockTotal();
      blockManager.activate(conf);
    } finally {
      writeUnlock();
View Full Code Here

          + blockManager.numOfUnderReplicatedBlocks() + " blocks");

      startSecretManagerIfNecessary();

      // If startup has not yet completed, end safemode phase.
      StartupProgress prog = NameNode.getStartupProgress();
      if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
        prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
        prog.endPhase(Phase.SAFEMODE);
      }
    }
View Full Code Here

    private synchronized void incrementSafeBlockCount(short replication) {
      if (replication == safeReplication) {
        this.blockSafe++;

        // Report startup progress only if we haven't completed startup yet.
        StartupProgress prog = NameNode.getStartupProgress();
        if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
          if (this.awaitingReportedBlocksCounter == null) {
            this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE,
              STEP_AWAITING_REPORTED_BLOCKS);
          }
          this.awaitingReportedBlocksCounter.increment();
        }
View Full Code Here

   * This is where we apply edits that we've been writing to disk all
   * along.
   */
  long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
      MetaRecoveryContext recovery) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = createStartupProgressStep(edits);
    prog.beginStep(Phase.LOADING_EDITS, step);
    fsNamesys.writeLock();
    try {
      long startTime = now();
      FSImage.LOG.info("Start loading edits file " + edits.getName());
      long numEdits = loadEditRecords(edits, false,
                                 expectedStartingTxId, recovery);
      FSImage.LOG.info("Edits file " + edits.getName()
          + " of size " + edits.length() + " edits # " + numEdits
          + " loaded in " + (now()-startTime)/1000 + " seconds");
      return numEdits;
    } finally {
      edits.close();
      fsNamesys.writeUnlock();
      prog.endStep(Phase.LOADING_EDITS, step);
    }
  }
View Full Code Here

   
    long expectedTxId = expectedStartingTxId;
    long numEdits = 0;
    long lastTxId = in.getLastTxId();
    long numTxns = (lastTxId - expectedStartingTxId) + 1;
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = createStartupProgressStep(in);
    prog.setTotal(Phase.LOADING_EDITS, step, numTxns);
    Counter counter = prog.getCounter(Phase.LOADING_EDITS, step);
    long lastLogTime = now();
    long lastInodeId = fsNamesys.getLastInodeId();
   
    try {
      while (true) {
View Full Code Here

    void load(File curFile) throws IOException {
      checkNotLoaded();
      assert curFile != null : "curFile is null";

      StartupProgress prog = NameNode.getStartupProgress();
      Step step = new Step(StepType.INODES);
      prog.beginStep(Phase.LOADING_FSIMAGE, step);
      long startTime = now();

      //
      // Load in bits
      //
      MessageDigest digester = MD5Hash.getDigester();
      DigestInputStream fin = new DigestInputStream(
           new FileInputStream(curFile), digester);

      DataInputStream in = new DataInputStream(fin);
      try {
        // read image version: first appeared in version -1
        int imgVersion = in.readInt();
        if (getLayoutVersion() != imgVersion) {
          throw new InconsistentFSStateException(curFile,
              "imgVersion " + imgVersion +
              " expected to be " + getLayoutVersion());
        }
        boolean supportSnapshot = LayoutVersion.supports(Feature.SNAPSHOT,
            imgVersion);
        if (LayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
          LayoutFlags.read(in);
        }

        // read namespaceID: first appeared in version -2
        in.readInt();

        long numFiles = in.readLong();

        // read in the last generation stamp for legacy blocks.
        long genstamp = in.readLong();
        namesystem.setGenerationStampV1(genstamp);
       
        if (LayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
          // read the starting generation stamp for sequential block IDs
          genstamp = in.readLong();
          namesystem.setGenerationStampV2(genstamp);

          // read the last generation stamp for blocks created after
          // the switch to sequential block IDs.
          long stampAtIdSwitch = in.readLong();
          namesystem.setGenerationStampV1Limit(stampAtIdSwitch);

          // read the max sequential block ID.
          long maxSequentialBlockId = in.readLong();
          namesystem.setLastAllocatedBlockId(maxSequentialBlockId);
        } else {
          long startingGenStamp = namesystem.upgradeGenerationStampToV2();
          // This is an upgrade.
          LOG.info("Upgrading to sequential block IDs. Generation stamp " +
                   "for new blocks set to " + startingGenStamp);
        }

        // read the transaction ID of the last edit represented by
        // this image
        if (LayoutVersion.supports(Feature.STORED_TXIDS, imgVersion)) {
          imgTxId = in.readLong();
        } else {
          imgTxId = 0;
        }

        // read the last allocated inode id in the fsimage
        if (LayoutVersion.supports(Feature.ADD_INODE_ID, imgVersion)) {
          long lastInodeId = in.readLong();
          namesystem.resetLastInodeId(lastInodeId);
          if (LOG.isDebugEnabled()) {
            LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId);
          }
        } else {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Old layout version doesn't have inode id."
                + " Will assign new id for each inode.");
          }
        }
       
        if (supportSnapshot) {
          snapshotMap = namesystem.getSnapshotManager().read(in, this);
        }

        // read compression related info
        FSImageCompression compression;
        if (LayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imgVersion)) {
          compression = FSImageCompression.readCompressionHeader(conf, in);
        } else {
          compression = FSImageCompression.createNoopCompression();
        }
        in = compression.unwrapInputStream(fin);

        LOG.info("Loading image file " + curFile + " using " + compression);
       
        // load all inodes
        LOG.info("Number of files = " + numFiles);
        prog.setTotal(Phase.LOADING_FSIMAGE, step, numFiles);
        Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
        if (LayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION,
            imgVersion)) {
          if (supportSnapshot) {
            loadLocalNameINodesWithSnapshot(numFiles, in, counter);
          } else {
            loadLocalNameINodes(numFiles, in, counter);
          }
        } else {
          loadFullNameINodes(numFiles, in, counter);
        }

        loadFilesUnderConstruction(in, supportSnapshot, counter);
        prog.endStep(Phase.LOADING_FSIMAGE, step);
        // Now that the step is finished, set counter equal to total to adjust
        // for possible under-counting due to reference inodes.
        prog.setCount(Phase.LOADING_FSIMAGE, step, numFiles);

        loadSecretManagerState(in);

        loadCacheManagerState(in);
View Full Code Here

      final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
      FSDirectory fsDir = sourceNamesystem.dir;
      String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
      Step step = new Step(StepType.INODES, sdPath);
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginStep(Phase.SAVING_CHECKPOINT, step);
      prog.setTotal(Phase.SAVING_CHECKPOINT, step,
        fsDir.rootDir.numItemsInTree());
      Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
      long startTime = now();
      //
      // Write out data
      //
      MessageDigest digester = MD5Hash.getDigester();
      FileOutputStream fout = new FileOutputStream(newFile);
      DigestOutputStream fos = new DigestOutputStream(fout, digester);
      DataOutputStream out = new DataOutputStream(fos);
      try {
        out.writeInt(HdfsConstants.LAYOUT_VERSION);
        LayoutFlags.write(out);
        // We use the non-locked version of getNamespaceInfo here since
        // the coordinating thread of saveNamespace already has read-locked
        // the namespace for us. If we attempt to take another readlock
        // from the actual saver thread, there's a potential of a
        // fairness-related deadlock. See the comments on HDFS-2223.
        out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
            .getNamespaceID());
        out.writeLong(fsDir.rootDir.numItemsInTree());
        out.writeLong(sourceNamesystem.getGenerationStampV1());
        out.writeLong(sourceNamesystem.getGenerationStampV2());
        out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
        out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
        out.writeLong(context.getTxId());
        out.writeLong(sourceNamesystem.getLastInodeId());

       
        sourceNamesystem.getSnapshotManager().write(out);
       
        // write compression info and set up compressed stream
        out = compression.writeHeaderAndWrapStream(fos);
        LOG.info("Saving image file " + newFile +
                 " using " + compression);

        // save the root
        saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
        // save the rest of the nodes
        saveImage(fsDir.rootDir, out, true, false, counter);
        prog.endStep(Phase.SAVING_CHECKPOINT, step);
        // Now that the step is finished, set counter equal to total to adjust
        // for possible under-counting due to reference inodes.
        prog.setCount(Phase.SAVING_CHECKPOINT, step,
          fsDir.rootDir.numItemsInTree());
        // save files under construction
        // TODO: for HDFS-5428, since we cannot break the compatibility of
        // fsimage, we store part of the under-construction files that are only
        // in snapshots in this "under-construction-file" section. As a
View Full Code Here

  /**
   * Save cache pools to fsimage
   */
  private void savePools(DataOutputStream out,
      String sdPath) throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.CACHE_POOLS, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(cachePools.size());
    for (CachePool pool: cachePools.values()) {
      FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

  /*
   * Save cache entries to fsimage
   */
  private void saveDirectives(DataOutputStream out, String sdPath)
      throws IOException {
    StartupProgress prog = NameNode.getStartupProgress();
    Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
    prog.beginStep(Phase.SAVING_CHECKPOINT, step);
    prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
    Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
    out.writeInt(directivesById.size());
    for (CacheDirective directive : directivesById.values()) {
      FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
      counter.increment();
    }
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.