Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile


        modDirStr + "file15");
    FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
    INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
        file14_s2.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks_14 = file14Node.getBlocks();
    TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
        blockmanager);
   
    // delete s2, in which process we need to combine the diff in s2 to s1
    hdfs.deleteSnapshot(snapshotRoot, "s2");
    checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 12 + delta,
        14 * BLOCKSIZE);
   
    // check the correctness of s1
    FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
    FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
    FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
    FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
    assertEquals(statusBeforeDeletion10.toString(),
        statusAfterDeletion10.toString());
    assertEquals(statusBeforeDeletion11.toString(),
        statusAfterDeletion11.toString());
    assertEquals(statusBeforeDeletion12.toString(),
        statusAfterDeletion12.toString());
    assertEquals(statusBeforeDeletion13.toString(),
        statusAfterDeletion13.toString());
    TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir,
        blockmanager);
    TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir,
        blockmanager);
   
    // make sure file14 and file15 are not included in s1
    Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file14");
    Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
        modDirStr + "file15");
    assertFalse(hdfs.exists(file14_s1));
    assertFalse(hdfs.exists(file15_s1));
    for (BlockInfo b : blocks_14) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
    assertEquals(REPLICATION_1, nodeFile13.getBlockReplication());
    TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
        blockmanager);
   
    INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
    assertEquals(REPLICATION_1, nodeFile12.getBlockReplication());
  }
View Full Code Here


                            storedBlock.getBlockName() +
                            " as corrupt because datanode " + dn.getName() +
                            " does not exist. ");
    }

    INodeFile inode = storedBlock.getINode();
    if (inode == null) {
      NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
                                   "block " + storedBlock +
                                   " could not be marked as corrupt as it" +
                                   " does not belong to any file");
      addToInvalidates(storedBlock, node);
      return;
    }

    // Add replica to the data-node if it is not already there
    node.addBlock(storedBlock);

    // Add this replica to corruptReplicas Map
    corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
      // the block is over-replicated so invalidate the replicas immediately
      invalidateBlock(storedBlock, node);
    } else if (namesystem.isPopulatingReplQueues()) {
      // add the block to neededReplication
      updateNeededReplications(storedBlock, -1, 0);
View Full Code Here

  @VisibleForTesting
  boolean computeReplicationWorkForBlock(Block block, int priority) {
    int requiredReplication, numEffectiveReplicas;
    List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
    DatanodeDescriptor srcNode;
    INodeFile fileINode = null;
    int additionalReplRequired;

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        // block should belong to a file
        fileINode = blocksMap.getINode(block);
        // abandoned block or block reopened for append
        if(fileINode == null || fileINode.isUnderConstruction()) {
          neededReplications.remove(block, priority); // remove from neededReplications
          replIndex--;
          return false;
        }

        requiredReplication = fileINode.getReplication();

        // get a source data-node
        containingNodes = new ArrayList<DatanodeDescriptor>();
        liveReplicaNodes = new ArrayList<DatanodeDescriptor>();
        NumberReplicas numReplicas = new NumberReplicas();
        srcNode = chooseSourceDatanode(
            block, containingNodes, liveReplicaNodes, numReplicas);
        if(srcNode == null) // block can not be replicated from any node
          return false;

        assert liveReplicaNodes.size() == numReplicas.liveReplicas();
        // do not schedule more if enough replicas is already pending
        numEffectiveReplicas = numReplicas.liveReplicas() +
                                pendingReplications.getNumReplicas(block);
     
        if (numEffectiveReplicas >= requiredReplication) {
          if ( (pendingReplications.getNumReplicas(block) > 0) ||
               (blockHasEnoughRacks(block)) ) {
            neededReplications.remove(block, priority); // remove from neededReplications
            replIndex--;
            NameNode.stateChangeLog.info("BLOCK* "
                + "Removing block " + block
                + " from neededReplications as it has enough replicas.");
            return false;
          }
        }

        if (numReplicas.liveReplicas() < requiredReplication) {
          additionalReplRequired = requiredReplication - numEffectiveReplicas;
        } else {
          additionalReplRequired = 1; //Needed on a new rack
        }

      }
    } finally {
      namesystem.writeUnlock();
    }
   
    // Exclude all of the containing nodes from being targets.
    // This list includes decommissioning or corrupt nodes.
    HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
    for (DatanodeDescriptor dn : containingNodes) {
      excludedNodes.put(dn, dn);
    }

    // choose replication targets: NOT HOLDING THE GLOBAL LOCK
    // It is costly to extract the filename for which chooseTargets is called,
    // so for now we pass in the Inode itself.
    DatanodeDescriptor targets[] =
                       blockplacement.chooseTarget(fileINode, additionalReplRequired,
                       srcNode, liveReplicaNodes, excludedNodes, block.getNumBytes());
    if(targets.length == 0)
      return false;

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        // Recheck since global lock was released
        // block should belong to a file
        fileINode = blocksMap.getINode(block);
        // abandoned block or block reopened for append
        if(fileINode == null || fileINode.isUnderConstruction()) {
          neededReplications.remove(block, priority); // remove from neededReplications
          replIndex--;
          return false;
        }
        requiredReplication = fileINode.getReplication();

        // do not schedule more if enough replicas is already pending
        NumberReplicas numReplicas = countNodes(block);
        numEffectiveReplicas = numReplicas.liveReplicas() +
        pendingReplications.getNumReplicas(block);
View Full Code Here

      // we could add this block to invalidate set of this datanode.
      // it will happen in next block report otherwise.
      return block;
    }
    assert storedBlock != null : "Block must be stored by now";
    INodeFile fileINode = storedBlock.getINode();
    assert fileINode != null : "Block must belong to a file";

    // add block to the datanode
    boolean added = node.addBlock(storedBlock);

    int curReplicaDelta;
    if (added) {
      curReplicaDelta = 1;
      if (logEveryBlock) {
        NameNode.stateChangeLog.info("BLOCK* addStoredBlock: "
            + "blockMap updated: " + node.getName() + " is added to " +
            storedBlock + " size " + storedBlock.getNumBytes());
      }
    } else {
      curReplicaDelta = 0;
      NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
          + "Redundant addStoredBlock request received for " + storedBlock
          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
    }

    // Now check for completion of blocks and safe block count
    NumberReplicas num = countNodes(storedBlock);
    int numLiveReplicas = num.liveReplicas();
    int numCurrentReplica = numLiveReplicas
      + pendingReplications.getNumReplicas(storedBlock);

    if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
        numLiveReplicas >= minReplication)
      storedBlock = completeBlock(fileINode, storedBlock);

    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that
    // Is no-op if not in safe mode.
    if(storedBlock.isComplete())
      namesystem.incrementSafeBlockCount(numCurrentReplica);

    // if file is under construction, then done for now
    if (fileINode.isUnderConstruction()) {
      return storedBlock;
    }

    // do not try to handle over/under-replicated blocks during safe mode
    if (!namesystem.isPopulatingReplQueues()) {
      return storedBlock;
    }

    // handle underReplication/overReplication
    short fileReplication = fileINode.getReplication();
    if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
      neededReplications.remove(storedBlock, numCurrentReplica,
          num.decommissionedReplicas(), fileReplication);
    } else {
      updateNeededReplications(storedBlock, curReplicaDelta, 0);
View Full Code Here

    long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0,
         nrUnderConstruction = 0;
    neededReplications.clear();
    for (BlockInfo block : blocksMap.getBlocks()) {
      INodeFile fileINode = block.getINode();
      if (fileINode == null) {
        // block does not belong to any file
        nrInvalid++;
        addToInvalidates(block);
        continue;
      }
      if (!block.isComplete()) {
        // Incomplete blocks are never considered mis-replicated --
        // they'll be reached when they are completed or recovered.
        nrUnderConstruction++;
        continue;
      }
      // calculate current replication
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      // add to under-replicated queue if need to be
      if (isNeededReplication(block, expectedReplication, numCurrentReplica)) {
        if (neededReplications.add(block, numCurrentReplica, num
View Full Code Here

                              DatanodeDescriptor addedNode,
                              DatanodeDescriptor delNodeHint,
                              BlockPlacementPolicy replicator) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    INodeFile inode = getINode(b);
    final Map<String, List<DatanodeDescriptor>> rackMap
        = new HashMap<String, List<DatanodeDescriptor>>();
    for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
        iter.hasNext(); ) {
      final DatanodeDescriptor node = iter.next();
View Full Code Here

  void processOverReplicatedBlocksOnReCommission(
      final DatanodeDescriptor srcNode) {
    final Iterator<? extends Block> it = srcNode.getBlockIterator();
    while(it.hasNext()) {
      final Block block = it.next();
      INodeFile fileINode = blocksMap.getINode(block);
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      if (numCurrentReplica > expectedReplication) {
        // over-replicated block
        processOverReplicatedBlock(block, expectedReplication, null, null);
View Full Code Here

    }
  }

  /* get replication factor of a block */
  private int getReplication(Block block) {
    INodeFile fileINode = blocksMap.getINode(block);
    if (fileINode == null) { // block does not belong to any file
      return 0;
    }
    assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
    return fileINode.getReplication();
  }
View Full Code Here

    // after creating snapshot s0, create a directory tempdir under dir and then
    // delete dir immediately
    Path tempDir = new Path(dir, "tempdir");
    Path tempFile = new Path(tempDir, "tempfile");
    DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
        tempFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = temp.getBlocks();
    hdfs.delete(tempDir, true);
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 3);
    // check blocks of tempFile
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // make a change: create a new file under subsub
    Path newFileAfterS0 = new Path(subsub, "newFile");
    DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
    // further change: change the replicator factor of metaChangeFile
    hdfs.setReplication(metaChangeFile1, REPLICATION_1);
    hdfs.setReplication(metaChangeFile2, REPLICATION_1);
   
    // create snapshot s1
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    // check dir's quota usage
    checkQuotaUsageComputation(dir, 14L, BLOCKSIZE * REPLICATION * 4);
   
    // get two snapshots for later use
    Snapshot snapshot0 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
        .toString())).getSnapshot(DFSUtil.string2Bytes("s0"));
    Snapshot snapshot1 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
        .toString())).getSnapshot(DFSUtil.string2Bytes("s1"));
   
    // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
    // metaChangeFile2. Note that when we directly delete a directory, the
    // directory will be converted to an INodeDirectoryWithSnapshot. To make
    // sure the deletion goes through an INodeDirectory, we delete the parent
    // of noChangeDir
    hdfs.delete(noChangeDirParent, true);
    // while deletion, we add a diff for metaChangeFile2 as its snapshot copy
    // for s1, we also add diffs for both sub and noChangeDirParent
    checkQuotaUsageComputation(dir, 17L, BLOCKSIZE * REPLICATION * 4);
   
    // check the snapshot copy of noChangeDir
    Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1",
        sub.getName() + "/" + noChangeDirParent.getName() + "/"
            + noChangeDir.getName());
    INodeDirectory snapshotNode =
        (INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
    // should still be an INodeDirectory
    assertEquals(INodeDirectory.class, snapshotNode.getClass());
    ReadOnlyList<INode> children = snapshotNode.getChildrenList(null);
    // check 2 children: noChangeFile and metaChangeFile2
    assertEquals(2, children.size());
    INode noChangeFileSCopy = children.get(1);
    assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
    assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    INodeFileWithSnapshot metaChangeFile2SCopy =
        (INodeFileWithSnapshot) children.get(0);
    assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
    assertEquals(INodeFileWithSnapshot.class, metaChangeFile2SCopy.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
        metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
   
    // check the replication factor of metaChangeFile2SCopy
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(null));
    assertEquals(REPLICATION_1,
        metaChangeFile2SCopy.getFileReplication(snapshot1));
    assertEquals(REPLICATION,
        metaChangeFile2SCopy.getFileReplication(snapshot0));
   
    // Case 4: delete directory sub
    // before deleting sub, we first create a new file under sub
    Path newFile = new Path(sub, "newFile");
    DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
    final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(
        newFile.toString(), 1, fsdir, blockmanager);
    blocks = newFileNode.getBlocks();
    checkQuotaUsageComputation(dir, 18L, BLOCKSIZE * REPLICATION * 5);
    hdfs.delete(sub, true);
    // while deletion, we add diff for subsub and metaChangeFile1, and remove
    // newFile
    checkQuotaUsageComputation(dir, 19L, BLOCKSIZE * REPLICATION * 4);
 
View Full Code Here

    Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
    DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
    DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
   
    final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
        .assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
    BlockInfo[] blocks = toDeleteFileNode.getBlocks();
   
    // create snapshot s0 on dir
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    checkQuotaUsageComputation(dir, 8, 3 * BLOCKSIZE * REPLICATION);
   
    // delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
    hdfs.delete(toDeleteFile, true);
    // the deletion adds diff of toDeleteFile and metaChangeDir
    checkQuotaUsageComputation(dir, 10, 3 * BLOCKSIZE * REPLICATION);
    // change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
    // /TestSnapshot/sub/noChangeDir/metaChangeFile
    hdfs.setReplication(metaChangeFile, REPLICATION_1);
    hdfs.setOwner(metaChangeDir, "unknown", "unknown");
    checkQuotaUsageComputation(dir, 11, 3 * BLOCKSIZE * REPLICATION);
   
    // create snapshot s1 on dir
    hdfs.createSnapshot(dir, "s1");
    checkQuotaUsageComputation(dir, 12, 3 * BLOCKSIZE * REPLICATION);
   
    // delete snapshot s0
    hdfs.deleteSnapshot(dir, "s0");
    // namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
    // metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
    // metaChangeFile's replication factor decreases
    checkQuotaUsageComputation(dir, 7, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
    for (BlockInfo b : blocks) {
      assertNull(blockmanager.getBlockCollection(b));
    }
   
    // check 1. there is no snapshot s0
    final INodeDirectorySnapshottable dirNode =
        (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
    Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
    assertNull(snapshot0);
    DirectoryDiffList diffList = dirNode.getDiffs();
    assertEquals(1, diffList.asList().size());
    assertEquals("s1", diffList.getLast().snapshot.getRoot().getLocalName());
    diffList = ((INodeDirectoryWithSnapshot) fsdir.getINode(
        metaChangeDir.toString())).getDiffs();
    assertEquals(0, diffList.asList().size());
   
    // check 2. noChangeDir and noChangeFile are still there
    final INodeDirectory noChangeDirNode =
        (INodeDirectory) fsdir.getINode(noChangeDir.toString());
    assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
    final INodeFile noChangeFileNode =
        (INodeFile) fsdir.getINode(noChangeFile.toString());
    assertEquals(INodeFile.class, noChangeFileNode.getClass());
    TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1,
        fsdir, blockmanager);
   
    // check 3: current metadata of metaChangeFile and metaChangeDir
    FileStatus status = hdfs.getFileStatus(metaChangeDir);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.