Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeDirectory


      SnapshotSection section = SnapshotSection.parseDelimitedFrom(in);
      int snum = section.getNumSnapshots();
      sm.setNumSnapshots(snum);
      sm.setSnapshotCounter(section.getSnapshotCounter());
      for (long sdirId : section.getSnapshottableDirList()) {
        INodeDirectory dir = fsDir.getInode(sdirId).asDirectory();
        final INodeDirectorySnapshottable sdir;
        if (!dir.isSnapshottable()) {
          sdir = new INodeDirectorySnapshottable(dir);
          fsDir.addToInodeMap(sdir);
        } else {
          // dir is root, and admin set root to snapshottable before
          sdir = (INodeDirectorySnapshottable) dir;
View Full Code Here


    private void loadSnapshots(InputStream in, int size) throws IOException {
      for (int i = 0; i < size; i++) {
        SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
            .parseDelimitedFrom(in);
        INodeDirectory root = loadINodeDirectory(pbs.getRoot(),
            parent.getLoaderContext());
        int sid = pbs.getSnapshotId();
        INodeDirectorySnapshottable parent = (INodeDirectorySnapshottable) fsDir
            .getInode(root.getId()).asDirectory();
        Snapshot snapshot = new Snapshot(sid, root, parent);
        // add the snapshot to parent, since we follow the sequence of
        // snapshotsByNames when saving, we do not need to sort when loading
        parent.addSnapshot(snapshot);
        snapshotMap.put(sid, snapshot);
View Full Code Here

      }
    } else if (inode.isFile()) {
      inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true);
    } else if (inode.isDirectory()) {
      Map<INode, INode> excludedNodes = null;
      INodeDirectory dir = inode.asDirectory();
      DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
      if (sf != null) {
        DirectoryDiffList diffList = sf.getDiffs();
        DirectoryDiff priorDiff = diffList.getDiffById(prior);
        if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
          List<INode> dList = priorDiff.diff.getList(ListType.DELETED);
View Full Code Here

      } else if (topNode.isFile() && topNode.asFile().isWithSnapshot()) {
        INodeFile file = topNode.asFile();
        counts.add(file.getDiffs().deleteSnapshotDiff(post, prior, file,
            collectedBlocks, removedINodes, countDiffChange));
      } else if (topNode.isDirectory()) {
        INodeDirectory dir = topNode.asDirectory();
        ChildrenDiff priorChildrenDiff = null;
        DirectoryWithSnapshotFeature sf = dir.getDirectoryWithSnapshotFeature();
        if (sf != null) {
          // delete files/dirs created after prior. Note that these
          // files/dirs, along with inode, were deleted right after post.
          DirectoryDiff priorDiff = sf.getDiffs().getDiffById(prior);
          if (priorDiff != null && priorDiff.getSnapshotId() == prior) {
            priorChildrenDiff = priorDiff.getChildrenDiff();
            counts.add(priorChildrenDiff.destroyCreatedList(dir,
                collectedBlocks, removedINodes));
          }
        }
       
        for (INode child : dir.getChildrenList(prior)) {
          if (priorChildrenDiff != null
              && priorChildrenDiff.search(ListType.DELETED,
                  child.getLocalNameBytes()) != null) {
            continue;
          }
View Full Code Here

   * If the path is already a snapshottable directory, update the quota.
   */
  public void setSnapshottable(final String path, boolean checkNestedSnapshottable)
      throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath4Write(path);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (checkNestedSnapshottable) {
      checkNestedSnapshottable(d, path);
    }


    final INodeDirectorySnapshottable s;
    if (d.isSnapshottable()) {
      //The directory is already a snapshottable directory.
      s = (INodeDirectorySnapshottable)d;
      s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
    } else {
      s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshotId(),
          fsdir.getINodeMap());
    }
    addSnapshottable(s);
  }
View Full Code Here

   *
   * @throws SnapshotException if there are snapshots in the directory.
   */
  public void resetSnapshottable(final String path) throws IOException {
    final INodesInPath iip = fsdir.getINodesInPath4Write(path);
    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
    if (!d.isSnapshottable()) {
      // the directory is already non-snapshottable
      return;
    }
    final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
    if (s.getNumSnapshots() > 0) {
View Full Code Here

  static final int SNAPSHOT_LIMIT = 1 << 16;

  /** Cast INode to INodeDirectorySnapshottable. */
  static public INodeDirectorySnapshottable valueOf(
      INode inode, String src) throws IOException {
    final INodeDirectory dir = INodeDirectory.valueOf(inode, src);
    if (!dir.isSnapshottable()) {
      throw new SnapshotException(
          "Directory is not a snapshottable directory: " + src);
    }
    return (INodeDirectorySnapshottable)dir;
  }
View Full Code Here

        throw new SnapshotException("The snapshot " + newName
            + " already exists for directory " + path);
      }
      // remove the one with old name from snapshotsByNames
      Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
      final INodeDirectory ssRoot = snapshot.getRoot();
      ssRoot.setLocalName(newNameBytes);
      indexOfNew = -indexOfNew - 1;
      if (indexOfNew <= indexOfOld) {
        snapshotsByNames.add(indexOfNew, snapshot);
      } else { // indexOfNew > indexOfOld
        snapshotsByNames.add(indexOfNew - 1, snapshot);
View Full Code Here

      final Snapshot snapshot = snapshotsByNames.get(i);
      int prior = Snapshot.findLatestSnapshot(this, snapshot.getId());
      try {
        Quota.Counts counts = cleanSubtree(snapshot.getId(), prior,
            collectedBlocks, removedINodes, true);
        INodeDirectory parent = getParent();
        if (parent != null) {
          // there will not be any WithName node corresponding to the deleted
          // snapshot, thus only update the quota usage in the current tree
          parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
              -counts.get(Quota.DISKSPACE), true);
        }
      } catch(QuotaExceededException e) {
        LOG.error("BUG: removeSnapshot increases namespace usage.", e);
      }
View Full Code Here

                         int blocksPerFile, long startingBlockId,
                         FileNameGenerator nameGenerator) {
   
    PermissionStatus p = new PermissionStatus("joeDoe", "people",
                                      new FsPermission((short)0777));
    INodeDirectory dirInode = new INodeDirectory(p, 0L);
    editLog.logMkDir(BASE_PATH, dirInode);
    long blockSize = 10;
    BlockInfo[] blocks = new BlockInfo[blocksPerFile];
    for (int iB = 0; iB < blocksPerFile; ++iB) {
      blocks[iB] =
       new BlockInfo(new Block(0, blockSize, BLOCK_GENERATION_STAMP),
                               replication);
    }
   
    long currentBlockId = startingBlockId;
    long bidAtSync = startingBlockId;

    for (int iF = 0; iF < numFiles; iF++) {
      for (int iB = 0; iB < blocksPerFile; ++iB) {
         blocks[iB].setBlockId(currentBlockId++);
      }

      try {

        INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
                      null, replication, 0, blockSize, blocks, p, "", "", null);
        // Append path to filename with information about blockIDs
        String path = "_" + iF + "_B" + blocks[0].getBlockId() +
                      "_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
        String filePath = nameGenerator.getNextFileName("");
        filePath = filePath + path;
        // Log the new sub directory in edits
        if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
          String currentDir = nameGenerator.getCurrentDir();
          dirInode = new INodeDirectory(p, 0L);
          editLog.logMkDir(currentDir, dirInode);
        }
        editLog.logOpenFile(filePath, inode);
        editLog.logCloseFile(filePath, inode);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.