Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


  @Override
  @Deprecated
  public Replica getReplica(String bpid, long blockId) {
    final Map<Block, BInfo> map = blockMap.get(bpid);
    if (map != null) {
      return map.get(new Block(blockId));
    }
    return null;
  }
View Full Code Here


  @Override
  public synchronized String getReplicaString(String bpid, long blockId) {
    Replica r = null;
    final Map<Block, BInfo> map = blockMap.get(bpid);
    if (map != null) {
      r = map.get(new Block(blockId));
    }
    return r == null? "null": r.toString();
  }
View Full Code Here

  @Override // FsDatasetSpi
  public Block getStoredBlock(String bpid, long blkid) throws IOException {
    final Map<Block, BInfo> map = blockMap.get(bpid);
    if (map != null) {
      BInfo binfo = map.get(new Block(blkid));
      if (binfo == null) {
        return null;
      }
      return new Block(blkid, binfo.getGenerationStamp(), binfo.getNumBytes());
    }
    return null;
  }
View Full Code Here

      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
        ExtendedBlock blk = locatedblock.getBlock();
        Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
            blk.getBlockPoolId(), blk.getBlockId());
        final File blockfile = DataNodeTestUtils.getFile(datanode,
            blk.getBlockPoolId(), b.getBlockId());
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

      try {
        namesystem.writeLock();
        synchronized(hm) {
          for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
            dd.addBlockToBeReplicated(
                new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
                ONE_TARGET);
          }
          DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
              namesystem).getCommands();
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
          assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);

          ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
          for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
            blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
          }
          dd.addBlocksToBeInvalidated(blockList);
          cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)
              .getCommands();
          assertEquals(2, cmds.length);
View Full Code Here

          final DatanodeStorageInfo[] storages = {
              dd1.getStorageInfos()[0],
              dd2.getStorageInfos()[0],
              dd3.getStorageInfos()[0]};
          BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
              new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
              BlockUCState.UNDER_RECOVERY, storages);
          dd1.addBlockToBeRecovered(blockInfo);
          DatanodeCommand[] cmds =
              NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
          BlockRecoveryCommand recoveryCommand = (BlockRecoveryCommand)cmds[0];
          assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
          DatanodeInfo[] recoveringNodes = recoveryCommand.getRecoveringBlocks()
              .toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
          assertEquals(3, recoveringNodes.length);
          assertEquals(recoveringNodes[0], dd1);
          assertEquals(recoveringNodes[1], dd2);
          assertEquals(recoveringNodes[2], dd3);

          // Test with one stale node.
          dd1.setLastUpdate(System.currentTimeMillis());
          // More than the default stale interval of 30 seconds.
          dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000);
          dd3.setLastUpdate(System.currentTimeMillis());
          blockInfo = new BlockInfoUnderConstruction(
              new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
              BlockUCState.UNDER_RECOVERY, storages);
          dd1.addBlockToBeRecovered(blockInfo);
          cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
          recoveryCommand = (BlockRecoveryCommand)cmds[0];
          assertEquals(1, recoveryCommand.getRecoveringBlocks().size());
          recoveringNodes = recoveryCommand.getRecoveringBlocks()
              .toArray(new BlockRecoveryCommand.RecoveringBlock[0])[0].getLocations();
          assertEquals(2, recoveringNodes.length);
          // dd2 is skipped.
          assertEquals(recoveringNodes[0], dd1);
          assertEquals(recoveringNodes[1], dd3);

          // Test with all stale node.
          dd1.setLastUpdate(System.currentTimeMillis() - 60 * 1000);
          // More than the default stale interval of 30 seconds.
          dd2.setLastUpdate(System.currentTimeMillis() - 40 * 1000);
          dd3.setLastUpdate(System.currentTimeMillis() - 80 * 1000);
          blockInfo = new BlockInfoUnderConstruction(
              new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP), 3,
              BlockUCState.UNDER_RECOVERY, storages);
          dd1.addBlockToBeRecovered(blockInfo);
          cmds = NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem).getCommands();
          assertEquals(1, cmds.length);
          assertEquals(DatanodeProtocol.DNA_RECOVERBLOCK, cmds[0].getAction());
View Full Code Here

    final DatanodeStorageInfo targets[] = getBlockManager().chooseTarget(
        src, replication, clientNode, excludedNodes, blockSize, favoredNodes);

    // Part II.
    // Allocate a new block, add it to the INode and the BlocksMap.
    Block newBlock = null;
    long offset;
    checkOperation(OperationCategory.WRITE);
    waitForLoadingFSImage();
    writeLock();
    try {
View Full Code Here

    checkNameNodeSafeMode("Cannot add block to " + src);

    // have we exceeded the configured limit of fs objects.
    checkFsObjectLimit();

    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
    INode inode;
    if (fileId == INodeId.GRANDFATHER_INODE_ID) {
      // Older clients may not have given us an inode ID to work with.
      // In this case, we have to try to resolve the path and hope it
      // hasn't changed or been deleted since the file was opened for write.
View Full Code Here

        // the file, but missed the RPC response. Thus, it is trying
        // again to close the file. If the file still exists and
        // the client's view of the last block matches the actual
        // last block, then we'll treat it as a successful close.
        // See HDFS-3031.
        final Block realLastBlock = inode.asFile().getLastBlock();
        if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
          NameNode.stateChangeLog.info("DIR* completeFile: " +
              "request from " + holder + " to complete inode " + fileId +
              "(" + src + ") which is already closed. But, it appears to be " +
              "an RPC retry. Returning success");
View Full Code Here

  /**
   * Create new block with a unique block id and a new generation stamp.
   */
  Block createNewBlock() throws IOException {
    assert hasWriteLock();
    Block b = new Block(nextBlockId(), 0, 0);
    // Increment the generation stamp for every new block.
    b.setGenerationStamp(nextGenerationStamp(false));
    return b;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.