Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INode$BlocksMapUpdateInfo


    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    final Path file = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
   
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INode subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
   
    hdfs.allowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectorySnapshottable);
View Full Code Here


    // Note that the replication number in FileStatus was derived from
    // INodeFile#getFileReplication().
    short fileReplication = hdfs.getFileStatus(file1).getReplication();
    assertEquals(replication, fileReplication);
    // Check the correctness of getBlockReplication()
    INode inode = fsdir.getINode(file1.toString());
    assertTrue(inode instanceof INodeFile);
    assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
  }
View Full Code Here

    hdfs.delete(dir, true);
  }
 
  private void checkQuotaUsageComputation(final Path dirPath,
      final long expectedNs, final long expectedDs) throws IOException {
    INode node = fsdir.getINode(dirPath.toString());
    assertTrue(node.isDirectory() && node.isQuotaSet());
    INodeDirectoryWithQuota dirNode = (INodeDirectoryWithQuota) node;
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
        dirNode.getNamespace());
    assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
        dirNode.getDiskspace());
View Full Code Here

    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    INodeFileWithSnapshot metaChangeFile2SCopy =
        (INodeFileWithSnapshot) children.get(0);
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertEquals(INodeFileWithSnapshot.class, metaChangeFile2SCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(null));
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(snapshot1));
    assertEquals(REPLICATION,
        metaChangeFile2SCopy.getFileReplication(snapshot0));
   
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(
        newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 18L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 19L, BLOCKSIZE * REPLICATION * 4);
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // make sure the whole subtree of sub is stored correctly in snapshot
    Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
        sub.getName());
    INodeDirectoryWithSnapshot snapshotNode4Sub =
        (INodeDirectoryWithSnapshot) fsdir.getINode(snapshotSub.toString());
    assertEquals(INodeDirectoryWithSnapshot.class, snapshotNode4Sub.getClass());
    // the snapshot copy of sub has only one child subsub.
    // newFile should have been destroyed
    assertEquals(1, snapshotNode4Sub.getChildrenList(null).size());
    // but should have two children, subsub and noChangeDir, when s1 was taken 
    assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1).size());
   
    // check the snapshot copy of subsub, which is contained in the subtree of
    // sub's snapshot copy
    INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(null).get(0);
    assertEquals(INodeDirectoryWithSnapshot.class,
        snapshotNode4Subsub.getClass());
    assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
    // check the children of subsub
    INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
    children = snapshotSubsubDir.getChildrenList(null);
    assertEquals(2, children.size());
    assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
    assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
    // only one child before snapshot s0
    children = snapshotSubsubDir.getChildrenList(snapshot0);
    assertEquals(1, children.size());
    INode child = children.get(0);
    assertEquals(child.getLocalName(), metaChangeFile1.getName());
    // check snapshot copy of metaChangeFile1
    assertEquals(INodeFileWithSnapshot.class, child.getClass());
    INodeFileWithSnapshot metaChangeFile1SCopy = (INodeFileWithSnapshot) child;
    assertEquals(REPLICATION_1,
        metaChangeFile1SCopy.getFileReplication(null));
    assertEquals(REPLICATION_1,
        metaChangeFile1SCopy.getFileReplication(snapshot1));
View Full Code Here

      // otherwise, randomly pick an operation.
      final int nextOperation = current.isEmpty()? 1: RANDOM.nextInt(3) + 1;
      switch(nextOperation) {
      case 1: // create
      {
        final INode i = newINode(n++, width);
        create(i, current, diffs.get(j));
        break;
      }
      case 2: // delete
      {
        final INode i = current.get(RANDOM.nextInt(current.size()));
        delete(i, current, diffs.get(j));
        break;
      }
      case 3: // modify
      {
        final INode i = current.get(RANDOM.nextInt(current.size()));
        modify(i, current, diffs.get(j));
        break;
      }
      }
    }

    {
      // check if current == previous + diffs
      List<INode> c = previous;
      for(int i = 0; i < diffs.size(); i++) {
        c = diffs.get(i).apply2Previous(c);
      }
      if (!hasIdenticalElements(current, c)) {
        System.out.println("previous = " + previous);
        System.out.println();
        System.out.println("current  = " + current);
        System.out.println("c        = " + c);
        throw new AssertionError("current and c are not identical.");
      }

      // check if previous == current - diffs
      List<INode> p = current;
      for(int i = diffs.size() - 1; i >= 0; i--) {
        p = diffs.get(i).apply2Current(p);
      }
      if (!hasIdenticalElements(previous, p)) {
        System.out.println("previous = " + previous);
        System.out.println("p        = " + p);
        System.out.println();
        System.out.println("current  = " + current);
        throw new AssertionError("previous and p are not identical.");
      }
    }

    // combine all diffs
    final Diff<byte[], INode> combined = diffs.get(0);
    for(int i = 1; i < diffs.size(); i++) {
      combined.combinePosterior(diffs.get(i), null);
    }

    {
      // check if current == previous + combined
      final List<INode> c = combined.apply2Previous(previous);
      if (!hasIdenticalElements(current, c)) {
        System.out.println("previous = " + previous);
        System.out.println();
        System.out.println("current  = " + current);
        System.out.println("c        = " + c);
        throw new AssertionError("current and c are not identical.");
      }

      // check if previous == current - combined
      final List<INode> p = combined.apply2Current(current);
      if (!hasIdenticalElements(previous, p)) {
        System.out.println("previous = " + previous);
        System.out.println("p        = " + p);
        System.out.println();
        System.out.println("current  = " + current);
        throw new AssertionError("previous and p are not identical.");
      }
    }

    {
      for(int m = 0; m < n; m++) {
        final INode inode = newINode(m, width);
        {// test accessPrevious
          final Container<INode> r = combined.accessPrevious(inode.getKey());
          final INode computed;
          if (r != null) {
            computed = r.getElement();
          } else {
            final int i = Diff.search(current, inode.getKey());
            computed = i < 0? null: current.get(i);
          }

          final int j = Diff.search(previous, inode.getKey());
          final INode expected = j < 0? null: previous.get(j);
          // must be the same object (equals is not enough)
          Assert.assertTrue(computed == expected);
        }

        {// test accessCurrent
          final Container<INode> r = combined.accessCurrent(inode.getKey());
          final INode computed;
          if (r != null) {
            computed = r.getElement();
          } else {
            final int i = Diff.search(previous, inode.getKey());
            computed = i < 0? null: previous.get(i);
          }

          final int j = Diff.search(current, inode.getKey());
          final INode expected = j < 0? null: current.get(j);
          // must be the same object (equals is not enough)
          Assert.assertTrue(computed == expected);
        }
      }
    }
View Full Code Here

      throws QuotaExceededException {
    Quota.Counts counts = Quota.Counts.newInstance();
    Deque<INode> queue = new ArrayDeque<INode>();
    queue.addLast(inode);
    while (!queue.isEmpty()) {
      INode topNode = queue.pollFirst();
      if (topNode instanceof INodeReference.WithName) {
        INodeReference.WithName wn = (INodeReference.WithName) topNode;
        if (wn.getLastSnapshotId() >= post.getId()) {
          wn.cleanSubtree(post, prior, collectedBlocks, removedINodes,
              countDiffChange);
        }
        // For DstReference node, since the node is not in the created list of
        // prior, we should treat it as regular file/dir
      } else if (topNode.isFile()
          && topNode.asFile() instanceof FileWithSnapshot) {
        FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile();
        counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior,
            topNode.asFile(), collectedBlocks, removedINodes, countDiffChange));
      } else if (topNode.isDirectory()) {
        INodeDirectory dir = topNode.asDirectory();
        ChildrenDiff priorChildrenDiff = null;
        if (dir instanceof INodeDirectoryWithSnapshot) {
          // delete files/dirs created after prior. Note that these
          // files/dirs, along with inode, were deleted right after post.
          INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
View Full Code Here

      final int i = search(list, oldChild.getLocalNameBytes());
      if (i < 0 || list.get(i).getId() != oldChild.getId()) {
        return false;
      }

      final INode removed = list.set(i, newChild);
      Preconditions.checkState(removed == oldChild);
      return true;
    }
View Full Code Here

      List<INode> created = getList(ListType.CREATED);
      List<INode> deleted = getList(ListType.DELETED);
      byte[][] fullPath = new byte[parentPath.length + 1][];
      System.arraycopy(parentPath, 0, fullPath, 0, parentPath.length);
      for (; c < created.size() && d < deleted.size(); ) {
        INode cnode = created.get(c);
        INode dnode = deleted.get(d);
        if (cnode.compareTo(dnode.getLocalNameBytes()) == 0) {
          fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
          // must be the case: delete first and then create an inode with the
          // same name
          cList.add(new DiffReportEntry(DiffType.CREATE, fullPath));
          dList.add(new DiffReportEntry(DiffType.DELETE, fullPath));
          c++;
          d++;
        } else if (cnode.compareTo(dnode.getLocalNameBytes()) < 0) {
          fullPath[fullPath.length - 1] = cnode.getLocalNameBytes();
          cList.add(new DiffReportEntry(fromEarlier ? DiffType.CREATE
              : DiffType.DELETE, fullPath));
          c++;
        } else {
          fullPath[fullPath.length - 1] = dnode.getLocalNameBytes();
          dList.add(new DiffReportEntry(fromEarlier ? DiffType.DELETE
              : DiffType.CREATE, fullPath));
          d++;
        }
      }
View Full Code Here

  }
 
  static Snapshot read(DataInput in, FSImageFormat.Loader loader)
      throws IOException {
    final int snapshotId = in.readInt();
    final INode root = loader.loadINodeWithLocalName(false, in, false);
    return new Snapshot(snapshotId, root.asDirectory(), null);
  }
View Full Code Here

  private static INode loadCreated(byte[] createdNodeName,
      INodeDirectoryWithSnapshot parent) throws IOException {
    // the INode in the created list should be a reference to another INode
    // in posterior SnapshotDiffs or one of the current children
    for (DirectoryDiff postDiff : parent.getDiffs()) {
      final INode d = postDiff.getChildrenDiff().search(ListType.DELETED,
          createdNodeName);
      if (d != null) {
        return d;
      } // else go to the next SnapshotDiff
    }
    // use the current child
    INode currentChild = parent.getChild(createdNodeName, null);
    if (currentChild == null) {
      throw new IOException("Cannot find an INode associated with the INode "
          + DFSUtil.bytes2String(createdNodeName)
          + " in created list while loading FSImage.");
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INode$BlocksMapUpdateInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.