Package org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot

Examples of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff


    final int size = in.readInt();
    if (size == -1) {
      return null;
    } else {
      final FileDiffList diffs = new FileDiffList();
      FileDiff posterior = null;
      for(int i = 0; i < size; i++) {
        final FileDiff d = loadFileDiff(posterior, in, loader);
        diffs.addFirst(d);
        posterior = d;
      }
      return diffs;
    }
View Full Code Here


   
    // 3. Load snapshotINode
    final INodeFileAttributes snapshotINode = in.readBoolean()?
        loader.loadINodeFileAttributes(in): null;
   
    return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
  }
View Full Code Here

        .getINode4Write(bar.toString());
    assertSame(barNode, children.get(0));
    assertSame(fooNode, barNode.getParent());
    List<FileDiff> barDiffList = barNode.getDiffs().asList();
    assertEquals(1, barDiffList.size());
    FileDiff barDiff = barDiffList.get(0);
    assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));
   
    // restart cluster multiple times to make sure the fsimage and edits log are
    // correct. Note that when loading fsimage, foo and bar will be converted
    // back to normal INodeDirectory and INodeFile since they do not store any
View Full Code Here

   * Compute file size of the current file if the given snapshot is null;
   * otherwise, get the file size from the given snapshot.
   */
  public final long computeFileSize(Snapshot snapshot) {
    if (snapshot != null && this instanceof FileWithSnapshot) {
      final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
      if (d != null) {
        return d.getFileSize();
      }
    }

    return computeFileSize(true, false);
  }
View Full Code Here

   * Compute file size of the current file if the given snapshot is null;
   * otherwise, get the file size from the given snapshot.
   */
  public final long computeFileSize(Snapshot snapshot) {
    if (snapshot != null && this instanceof FileWithSnapshot) {
      final FileDiff d = ((FileWithSnapshot)this).getDiffs().getDiff(snapshot);
      if (d != null) {
        return d.getFileSize();
      }
    }

    return computeFileSize(true, false);
  }
View Full Code Here

    final int size = in.readInt();
    if (size == -1) {
      return null;
    } else {
      final FileDiffList diffs = new FileDiffList();
      FileDiff posterior = null;
      for(int i = 0; i < size; i++) {
        final FileDiff d = loadFileDiff(posterior, in, loader);
        diffs.addFirst(d);
        posterior = d;
      }
      return diffs;
    }
View Full Code Here

   
    // 3. Load snapshotINode
    final INodeFileAttributes snapshotINode = in.readBoolean()?
        loader.loadINodeFileAttributes(in): null;
   
    return new FileDiff(snapshot, snapshotINode, posterior, fileSize);
  }
View Full Code Here

        .getINode4Write(bar.toString());
    assertSame(barNode, children.get(0));
    assertSame(fooNode, barNode.getParent());
    List<FileDiff> barDiffList = barNode.getDiffs().asList();
    assertEquals(1, barDiffList.size());
    FileDiff barDiff = barDiffList.get(0);
    assertEquals(snap1, Snapshot.getSnapshotName(barDiff.snapshot));
   
    // restart cluster multiple times to make sure the fsimage and edits log are
    // correct. Note that when loading fsimage, foo and bar will be converted
    // back to normal INodeDirectory and INodeFile since they do not store any
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.