Package org.apache.hadoop.hdfs.server.namenode.snapshot

Examples of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot


       
        INode referred = getReferredINode().asReference().getReferredINode();
        if (referred instanceof FileWithSnapshot) {
          // if referred is a file, it must be a FileWithSnapshot since we did
          // recordModification before the rename
          FileWithSnapshot sfile = (FileWithSnapshot) referred;
          // make sure we mark the file as deleted
          sfile.deleteCurrentFile();
          if (snapshot != null) {
            try {
              // when calling cleanSubtree of the referred node, since we
              // compute quota usage updates before calling this destroy
              // function, we use true for countDiffChange
View Full Code Here


  private void computeContentSummary4Snapshot(final Content.Counts counts) {
    // file length and diskspace only counted for the latest state of the file
    // i.e. either the current state or the last snapshot
    if (this instanceof FileWithSnapshot) {
      final FileWithSnapshot withSnapshot = (FileWithSnapshot)this;
      final FileDiffList diffs = withSnapshot.getDiffs();
      final int n = diffs.asList().size();
      counts.add(Content.FILE, n);
      if (n > 0 && withSnapshot.isCurrentFileDeleted()) {
        counts.add(Content.LENGTH, diffs.getLast().getFileSize());
      }

      if (withSnapshot.isCurrentFileDeleted()) {
        final long lastFileSize = diffs.getLast().getFileSize();
        counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
      }
    }
  }
View Full Code Here

       
        INode referred = getReferredINode().asReference().getReferredINode();
        if (referred instanceof FileWithSnapshot) {
          // if referred is a file, it must be a FileWithSnapshot since we did
          // recordModification before the rename
          FileWithSnapshot sfile = (FileWithSnapshot) referred;
          // make sure we mark the file as deleted
          sfile.deleteCurrentFile();
          try {
            // when calling cleanSubtree of the referred node, since we
            // compute quota usage updates before calling this destroy
            // function, we use true for countDiffChange
            referred.cleanSubtree(snapshot, prior, collectedBlocks,
View Full Code Here

  private void computeContentSummary4Snapshot(final Content.Counts counts) {
    // file length and diskspace only counted for the latest state of the file
    // i.e. either the current state or the last snapshot
    if (this instanceof FileWithSnapshot) {
      final FileWithSnapshot withSnapshot = (FileWithSnapshot)this;
      final FileDiffList diffs = withSnapshot.getDiffs();
      final int n = diffs.asList().size();
      counts.add(Content.FILE, n);
      if (n > 0 && withSnapshot.isCurrentFileDeleted()) {
        counts.add(Content.LENGTH, diffs.getLast().getFileSize());
      }

      if (withSnapshot.isCurrentFileDeleted()) {
        final long lastFileSize = diffs.getLast().getFileSize();
        counts.add(Content.DISKSPACE, lastFileSize * getBlockReplication());
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.