Package org.apache.hadoop.dfs.BlocksMap

Examples of org.apache.hadoop.dfs.BlocksMap.BlockInfo


                              "failed to remove " + path);
      }
      rootDir.addNode(path, newnode);
      int index = 0;
      for (Block b : newnode.getBlocks()) {
        BlockInfo info = namesystem.blocksMap.addINode(b, newnode);
        newnode.setBlock(index, info); // inode refers to the block in BlocksMap
        index++;
      }
    }
  }
View Full Code Here


      ) throws IOException {
    LOG.info("commitBlockSynchronization(lastblock=" + lastblock
          + ", newgenerationstamp=" + newgenerationstamp
          + ", newlength=" + newlength
          + ", newtargets=" + Arrays.asList(newtargets) + ")");
    final BlockInfo oldblockinfo = blocksMap.getStoredBlock(lastblock);
    if (oldblockinfo == null) {
      throw new IOException("Block (=" + lastblock + ") not found");
    }
    INodeFile iFile = oldblockinfo.getINode();
    if (!iFile.isUnderConstruction()) {
      throw new IOException("Unexpected block (=" + lastblock
          + ") since the file (=" + iFile.getLocalName()
          + ") is not under construction");
    }
    INodeFileUnderConstruction pendingFile = (INodeFileUnderConstruction)iFile;

    // Remove old block from blocks map. This always have to be done
    // because the generation stamp of this block is changing.
    blocksMap.removeBlock(oldblockinfo);

    if (deleteblock) {
      pendingFile.removeBlock(lastblock);
    }
    else {
      // update last block, construct newblockinfo and add it to the blocks map
      lastblock.set(lastblock.blkid, newlength, newgenerationstamp);
      final BlockInfo newblockinfo = blocksMap.addINode(lastblock, pendingFile);
   
      //update block info
      DatanodeDescriptor[] descriptors = null;
      if (newtargets.length > 0) {
        descriptors = new DatanodeDescriptor[newtargets.length];
View Full Code Here

   * @return the block that is stored in blockMap.
   */
  synchronized Block addStoredBlock(Block block,
                                    DatanodeDescriptor node,
                                    DatanodeDescriptor delNodeHint) {
    BlockInfo storedBlock = blocksMap.getStoredBlock(block);
    if(storedBlock == null || storedBlock.getINode() == null) {
      // If this block does not belong to anyfile, then we are done.
      NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
                                   + "addStoredBlock request received for "
                                   + block + " on " + node.getName()
                                   + " size " + block.getNumBytes()
                                   + " But it does not belong to any file.");
      // we could add this block to invalidate set of this datanode.
      // it will happen in next block report otherwise.
      return block;     
    }
    
    // add block to the data-node
    boolean added = node.addBlock(storedBlock);
   
    assert storedBlock != null : "Block must be stored by now";

    if (block != storedBlock) {
      if (block.getNumBytes() > 0) {
        long cursize = storedBlock.getNumBytes();
        if (cursize == 0) {
          storedBlock.setNumBytes(block.getNumBytes());
        } else if (cursize != block.getNumBytes()) {
          LOG.warn("Inconsistent size for block " + block +
                   " reported from " + node.getName() +
                   " current size is " + cursize +
                   " reported size is " + block.getNumBytes());
          try {
            if (cursize > block.getNumBytes()) {
              // new replica is smaller in size than existing block.
              // Delete new replica.
              LOG.warn("Deleting block " + block + " from " + node.getName());
              invalidateBlock(block, node);
            } else {
              // new replica is larger in size than existing block.
              // Delete pre-existing replicas.
              int numNodes = blocksMap.numNodes(block);
              int count = 0;
              DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
              Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
              for (; it != null && it.hasNext(); ) {
                DatanodeDescriptor dd = it.next();
                if (!dd.equals(node)) {
                  nodes[count++] = dd;
                }
              }
              for (int j = 0; j < count; j++) {
                LOG.warn("Deleting block " + block + " from " +
                         nodes[j].getName());
                invalidateBlock(block, nodes[j]);
              }
              //
              // change the size of block in blocksMap
              //
              storedBlock = blocksMap.getStoredBlock(block); //extra look up!
              if (storedBlock == null) {
                LOG.warn("Block " + block +
                   " reported from " + node.getName() +
                   " does not exist in blockMap. Surprise! Surprise!");
              } else {
                storedBlock.setNumBytes(block.getNumBytes());
              }
            }
          } catch (IOException e) {
            LOG.warn("Error in deleting bad block " + block + e);
          }
        }
      }
      block = storedBlock;
    }
    assert storedBlock == block : "Block must be stored by now";
       
    int curReplicaDelta = 0;
       
    if (added) {
      curReplicaDelta = 1;
      //
      // At startup time, because too many new blocks come in
      // they take up lots of space in the log file.
      // So, we log only when namenode is out of safemode.
      //
      if (!isInSafeMode()) {
        NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
                                      +"blockMap updated: "+node.getName()+" is added to "+block+" size "+block.getNumBytes());
      }
    } else {
      NameNode.stateChangeLog.warn("BLOCK* NameSystem.addStoredBlock: "
                                   + "Redundant addStoredBlock request received for "
                                   + block + " on " + node.getName()
                                   + " size " + block.getNumBytes());
    }

    // filter out containingNodes that are marked for decommission.
    NumberReplicas num = countNodes(storedBlock);
    int numLiveReplicas = num.liveReplicas();
    int numCurrentReplica = numLiveReplicas
      + pendingReplications.getNumReplicas(block);

    // check whether safe replication is reached for the block
    incrementSafeBlockCount(numCurrentReplica);
    //
    // if file is being actively written to, then do not check
    // replication-factor here. It will be checked when the file is closed.
    //
    INodeFile fileINode = null;
    fileINode = storedBlock.getINode();
    if (fileINode.isUnderConstruction()) {
      return block;
    }

    // do not handle mis-replicated blocks during startup
View Full Code Here

   * Verifies that the block is associated with a file that has a lease.
   * Increments, logs and then returns the stamp
   */
  synchronized long nextGenerationStampForBlock(Block block) throws IOException {
    String msg = "Block " + block + " is already commited.";
    BlockInfo storedBlock = blocksMap.getStoredBlock(block);
    if (storedBlock == null) {
      LOG.info(msg);
      throw new IOException(msg);
    }
    INode fileINode = storedBlock.getINode();
    if (!fileINode.isUnderConstruction()) {
      LOG.info(msg);
      throw new IOException(msg);
    }
    return nextGenerationStamp();
View Full Code Here

                  Collection<Block> toAdd,
                  Collection<Block> toRemove,
                  Collection<Block> toInvalidate) {
    // place a deilimiter in the list which separates blocks
    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = this.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    if(newReport == null)
      newReport = new BlockListAsLongs( new long[0]);
    // scan the report and collect newly reported blocks
    // Note we are taking special precaution to limit tmp blocks allocated
    // as part this block report - which why block list is stored as longs
    Block iblk = new Block(); // a fixed new'ed block to be reused with index i
    for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) {
      iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i),
               newReport.getBlockGenStamp(i));
      BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
      if(storedBlock == null) {
        // If block is not in blocksMap it does not belong to any file
        toInvalidate.add(new Block(iblk));
        continue;
      }
      if(storedBlock.findDatanode(this) < 0) {// Known block, but not on the DN
        // if the size differs from what is in the blockmap, then return
        // the new block. addStoredBlock will then pick up the right size of this
        // block and will update the block object in the BlocksMap
        if (storedBlock.getNumBytes() != iblk.getNumBytes()) {
          toAdd.add(new Block(iblk));
        } else {
          toAdd.add(storedBlock);
        }
        continue;
View Full Code Here

    public boolean hasNext() {
      return current != null;
    }

    public BlockInfo next() {
      BlockInfo res = current;
      current = current.getNext(current.findDatanode(node));
      return res;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.BlocksMap.BlockInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.