Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.INodeFile


    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
    hdfs.append(bar);

    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc()
        .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
            null, barNode.getId(), null);

    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

    barNode = fsdir.getINode4Write(bar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());

    hdfs.delete(bar, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
        bar.getName());
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  }
View Full Code Here


    // stored in the deleted list of foo, and will be destroyed.
    hdfs.delete(foo2, true);
   
    // check if /dir3/bar still exists
    assertTrue(hdfs.exists(bar3));
    INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
    assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
  }
View Full Code Here

                            storedBlock.getBlockName() +
                            " as corrupt because datanode " + dn.getName() +
                            " does not exist. ");
    }

    INodeFile inode = storedBlock.getINode();
    if (inode == null) {
      NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
                                   "block " + storedBlock +
                                   " could not be marked as corrupt as it" +
                                   " does not belong to any file");
      addToInvalidates(storedBlock, node);
      return;
    }

    // Add replica to the data-node if it is not already there
    node.addBlock(storedBlock);

    // Add this replica to corruptReplicas Map
    corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
      // the block is over-replicated so invalidate the replicas immediately
      invalidateBlock(storedBlock, node);
    } else if (namesystem.isPopulatingReplQueues()) {
      // add the block to neededReplication
      updateNeededReplications(storedBlock, -1, 0);
View Full Code Here

  @VisibleForTesting
  boolean computeReplicationWorkForBlock(Block block, int priority) {
    int requiredReplication, numEffectiveReplicas;
    List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
    DatanodeDescriptor srcNode;
    INodeFile fileINode = null;
    int additionalReplRequired;

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        // block should belong to a file
        fileINode = blocksMap.getINode(block);
        // abandoned block or block reopened for append
        if(fileINode == null || fileINode.isUnderConstruction()) {
          neededReplications.remove(block, priority); // remove from neededReplications
          replIndex--;
          return false;
        }

        requiredReplication = fileINode.getReplication();

        // get a source data-node
        containingNodes = new ArrayList<DatanodeDescriptor>();
        liveReplicaNodes = new ArrayList<DatanodeDescriptor>();
        NumberReplicas numReplicas = new NumberReplicas();
        srcNode = chooseSourceDatanode(
            block, containingNodes, liveReplicaNodes, numReplicas);
        if(srcNode == null) // block can not be replicated from any node
          return false;

        assert liveReplicaNodes.size() == numReplicas.liveReplicas();
        // do not schedule more if enough replicas is already pending
        numEffectiveReplicas = numReplicas.liveReplicas() +
                                pendingReplications.getNumReplicas(block);
     
        if (numEffectiveReplicas >= requiredReplication) {
          if ( (pendingReplications.getNumReplicas(block) > 0) ||
               (blockHasEnoughRacks(block)) ) {
            neededReplications.remove(block, priority); // remove from neededReplications
            replIndex--;
            NameNode.stateChangeLog.info("BLOCK* "
                + "Removing block " + block
                + " from neededReplications as it has enough replicas.");
            return false;
          }
        }

        if (numReplicas.liveReplicas() < requiredReplication) {
          additionalReplRequired = requiredReplication - numEffectiveReplicas;
        } else {
          additionalReplRequired = 1; //Needed on a new rack
        }

      }
    } finally {
      namesystem.writeUnlock();
    }
   
    // Exclude all of the containing nodes from being targets.
    // This list includes decommissioning or corrupt nodes.
    HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
    for (DatanodeDescriptor dn : containingNodes) {
      excludedNodes.put(dn, dn);
    }

    // choose replication targets: NOT HOLDING THE GLOBAL LOCK
    // It is costly to extract the filename for which chooseTargets is called,
    // so for now we pass in the Inode itself.
    DatanodeDescriptor targets[] =
                       blockplacement.chooseTarget(fileINode, additionalReplRequired,
                       srcNode, liveReplicaNodes, excludedNodes, block.getNumBytes());
    if(targets.length == 0)
      return false;

    namesystem.writeLock();
    try {
      synchronized (neededReplications) {
        // Recheck since global lock was released
        // block should belong to a file
        fileINode = blocksMap.getINode(block);
        // abandoned block or block reopened for append
        if(fileINode == null || fileINode.isUnderConstruction()) {
          neededReplications.remove(block, priority); // remove from neededReplications
          replIndex--;
          return false;
        }
        requiredReplication = fileINode.getReplication();

        // do not schedule more if enough replicas is already pending
        NumberReplicas numReplicas = countNodes(block);
        numEffectiveReplicas = numReplicas.liveReplicas() +
        pendingReplications.getNumReplicas(block);
View Full Code Here

      // we could add this block to invalidate set of this datanode.
      // it will happen in next block report otherwise.
      return block;
    }
    assert storedBlock != null : "Block must be stored by now";
    INodeFile fileINode = storedBlock.getINode();
    assert fileINode != null : "Block must belong to a file";

    // add block to the datanode
    boolean added = node.addBlock(storedBlock);

    int curReplicaDelta;
    if (added) {
      curReplicaDelta = 1;
      if (logEveryBlock) {
        NameNode.stateChangeLog.info("BLOCK* addStoredBlock: "
            + "blockMap updated: " + node.getName() + " is added to " +
            storedBlock + " size " + storedBlock.getNumBytes());
      }
    } else {
      curReplicaDelta = 0;
      NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
          + "Redundant addStoredBlock request received for " + storedBlock
          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
    }

    // Now check for completion of blocks and safe block count
    NumberReplicas num = countNodes(storedBlock);
    int numLiveReplicas = num.liveReplicas();
    int numCurrentReplica = numLiveReplicas
      + pendingReplications.getNumReplicas(storedBlock);

    if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
        numLiveReplicas >= minReplication)
      storedBlock = completeBlock(fileINode, storedBlock);

    // check whether safe replication is reached for the block
    // only complete blocks are counted towards that
    // Is no-op if not in safe mode.
    if(storedBlock.isComplete())
      namesystem.incrementSafeBlockCount(numCurrentReplica);

    // if file is under construction, then done for now
    if (fileINode.isUnderConstruction()) {
      return storedBlock;
    }

    // do not try to handle over/under-replicated blocks during safe mode
    if (!namesystem.isPopulatingReplQueues()) {
      return storedBlock;
    }

    // handle underReplication/overReplication
    short fileReplication = fileINode.getReplication();
    if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
      neededReplications.remove(storedBlock, numCurrentReplica,
          num.decommissionedReplicas(), fileReplication);
    } else {
      updateNeededReplications(storedBlock, curReplicaDelta, 0);
View Full Code Here

    long nrInvalid = 0, nrOverReplicated = 0, nrUnderReplicated = 0,
         nrUnderConstruction = 0;
    neededReplications.clear();
    for (BlockInfo block : blocksMap.getBlocks()) {
      INodeFile fileINode = block.getINode();
      if (fileINode == null) {
        // block does not belong to any file
        nrInvalid++;
        addToInvalidates(block);
        continue;
      }
      if (!block.isComplete()) {
        // Incomplete blocks are never considered mis-replicated --
        // they'll be reached when they are completed or recovered.
        nrUnderConstruction++;
        continue;
      }
      // calculate current replication
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      // add to under-replicated queue if need to be
      if (isNeededReplication(block, expectedReplication, numCurrentReplica)) {
        if (neededReplications.add(block, numCurrentReplica, num
View Full Code Here

                              DatanodeDescriptor addedNode,
                              DatanodeDescriptor delNodeHint,
                              BlockPlacementPolicy replicator) {
    assert namesystem.hasWriteLock();
    // first form a rack to datanodes map and
    INodeFile inode = getINode(b);
    final Map<String, List<DatanodeDescriptor>> rackMap
        = new HashMap<String, List<DatanodeDescriptor>>();
    for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
        iter.hasNext(); ) {
      final DatanodeDescriptor node = iter.next();
View Full Code Here

  void processOverReplicatedBlocksOnReCommission(
      final DatanodeDescriptor srcNode) {
    final Iterator<? extends Block> it = srcNode.getBlockIterator();
    while(it.hasNext()) {
      final Block block = it.next();
      INodeFile fileINode = blocksMap.getINode(block);
      short expectedReplication = fileINode.getReplication();
      NumberReplicas num = countNodes(block);
      int numCurrentReplica = num.liveReplicas();
      if (numCurrentReplica > expectedReplication) {
        // over-replicated block
        processOverReplicatedBlock(block, expectedReplication, null, null);
View Full Code Here

    }
  }

  /* get replication factor of a block */
  private int getReplication(Block block) {
    INodeFile fileINode = blocksMap.getINode(block);
    if (fileINode == null) { // block does not belong to any file
      return 0;
    }
    assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
    return fileINode.getReplication();
  }
View Full Code Here

    }
    return nodes;
  }
 
  private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
    INodeFile iNode = Mockito.mock(INodeFile.class);
    Mockito.doReturn((short)3).when(iNode).getReplication();
    BlockInfo blockInfo = blockOnNodes(blockId, nodes);

    bm.blocksMap.addINode(blockInfo, iNode);
    return blockInfo;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.INodeFile

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.