Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlock


        try {
          // First time should fail.
          List<LocatedBlock> locatedBlocks =
              cluster.getNameNode().getRpcServer().getBlockLocations(
              TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
          LocatedBlock lblock = locatedBlocks.get(0); // first block
          BlockReader blockReader = null;
          try {
            blockReader = BlockReaderTestUtil.
                getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
            Assert.fail("expected getBlockReader to fail the first time.");
View Full Code Here


    final Semaphore sem = new Semaphore(0);
    final List<LocatedBlock> locatedBlocks =
        cluster.getNameNode().getRpcServer().getBlockLocations(
            TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
    final LocatedBlock lblock = locatedBlocks.get(0); // first block
    final byte[] buf = new byte[TEST_FILE_LEN];
    Runnable readerRunnable = new Runnable() {
      @Override
      public void run() {
        try {
View Full Code Here

      DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
      assertTrue(dfs.exists(filepath));
      DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

      //get block info for the last block
      LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.getNamenode(), filestr);
      DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
      assertEquals(REPLICATION_NUM, datanodeinfos.length);

      //connect to data nodes
      DataNode[] datanodes = new DataNode[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
        assertTrue(datanodes[i] != null);
      }
     
      //verify Block Info
      ExtendedBlock lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      for(int i = 0; i < REPLICATION_NUM; i++) {
        checkMetaInfo(lastblock, datanodes[i]);
      }
View Full Code Here

   */
  private LocatedBlock getBlockAt(long offset, boolean updatePosition,
      boolean throwWhenNotFound)    throws IOException {
    assert (locatedBlocks != null) : "locatedBlocks is null";
    // search cached blocks first
    LocatedBlock blk = locatedBlocks.getBlockContainingOffset(offset);
    if (blk == null) { // block is not cached
      // fetch more blocks
      LocatedBlocks newBlocks;
      newBlocks = getLocatedBlocks(src, offset, prefetchSize);
      if (newBlocks == null) {
        if (!throwWhenNotFound) {
          return null;
        }
        throw new IOException("Could not find target position " + offset);
      }
      locatedBlocks.insertRange(newBlocks.getLocatedBlocks());
      locatedBlocks.setFileLength(newBlocks.getFileLength());
    }
    blk = locatedBlocks.getBlockContainingOffset(offset);
    if (blk == null) {
      if (!throwWhenNotFound) {
        return null;
      }
      throw new IOException("Failed to determine location for block at "
          + "offset=" + offset);
    }
    if (updatePosition) {
      // update current position
      this.pos = offset;
      this.blockEnd = blk.getStartOffset() + blk.getBlockSize() - 1;
      this.currentBlock = blk.getBlock();
      isCurrentBlockUnderConstruction = locatedBlocks
          .isUnderConstructionBlock(this.currentBlock);
    }
    return blk;
  }
View Full Code Here

                     ". Aborting...";
        DFSClient.LOG.warn(msg);
        throw new IOException(msg);
      }

      LocatedBlock blk = locatedBlocks.getBlockContainingOffset(curOff);
      if (blk == null) {
        LocatedBlocks newBlocks;
        newBlocks = getLocatedBlocks(src, curOff, remaining);
        if (newBlocks == null) {
          throw new IOException("Could not get block locations for curOff=" +
              curOff + ", remaining=" + remaining + " (offset=" + offset +
              ")");
        }
        locatedBlocks.insertRange(newBlocks.getLocatedBlocks());
        continue;
      }

      blockRange.add(blk);
      long bytesRead = blk.getStartOffset() + blk.getBlockSize() - curOff;
      remaining -= bytesRead;
      curOff += bytesRead;
    }

    DFSClient.checkBlockRange(blockRange, offset, length);
View Full Code Here

    }

    //
    // Compute desired block.
    //
    LocatedBlock targetBlock = getBlockAt(target, true, throwWhenNotFound);
    // Given target<= fileLength, when and only whenallowSeektoEnd is true and
    // there is no block for the file yet, getBlockAt() returns null, in this
    // case we should simply return null.
    //
    if (targetBlock == null) {
      assert target == 0;
      return null;
    }
    assert (target==this.pos) : "Wrong postion " + pos + " expect " + target;
    long offsetIntoBlock = target - targetBlock.getStartOffset();

    //
    // Connect to best DataNode for desired Block, with potential offset
    //
    DatanodeInfo chosenNode = null;
    while (s == null) {
      DNAddrPair retval = chooseDataNode(targetBlock);
      chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;

      // try reading the block locally. if this fails, then go via
      // the datanode
      Block blk = targetBlock.getBlock();
      try {
        if (DFSClient.LOG.isDebugEnabled()) {
          DFSClient.LOG.warn("blockSeekTo shortCircuitLocalReads "
                   + dfsClient.shortCircuitLocalReads +
                   " localhost " + dfsClient.localHost +
                   " targetAddr " + targetAddr);
        }
        if (dfsClient.shortCircuitLocalReads && dfsClient.localHost != null &&
            (targetAddr.equals(dfsClient.localHost) ||
             targetAddr.getHostName().startsWith("localhost"))) {
          blockReader = BlockReaderLocal.newBlockReader(dfsClient.conf, src,
                                                 namespaceId, blk,
                                                 chosenNode,
                                                 offsetIntoBlock,
                                                 blk.getNumBytes() - offsetIntoBlock,
                                                 dfsClient.metrics,
                                                 this.verifyChecksum,
                                                 this.clearOsBuffer);
          blockReader.setReadLocal(true);
          blockReader.setFsStats(dfsClient.stats);
          return chosenNode;
        }
      } catch (IOException ex) {
        DFSClient.LOG.info("Failed to read block " + targetBlock.getBlock() +
                 " on local machine " + dfsClient.localHost +
                 ". Try via the datanode on " + targetAddr + ":"
                  + StringUtils.stringifyException(ex));
      }

      try {
        s = dfsClient.socketFactory.createSocket();
        NetUtils.connect(s, targetAddr, dfsClient.socketTimeout,
            dfsClient.ipTosValue);
        s.setSoTimeout(dfsClient.socketTimeout);

        long minReadSpeedBps = (dfsClient.numNodeLeft(targetBlock.getLocations(),
            deadNodes) > 1) ? dfsClient.minReadSpeedBps : -1;
        blockReader = BlockReader.newBlockReader(
            dfsClient.getDataTransferProtocolVersion(), namespaceId,
            s, src, blk.getBlockId(),
            blk.getGenerationStamp(),
View Full Code Here

    }

    // if the file is under construction, then fetch size of last block
    // from datanode.
    if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
      if (last.getLocations().length > 0) {
        try {
          Block newBlock = getBlockInfo(last);
          // only if the block has data (not null)
          if (newBlock != null) {
            long newBlockSize = newBlock.getNumBytes();
            newInfo.setLastBlockSize(newBlock.getBlockId(), newBlockSize);
          }
        } catch (IOException e) {
          DFSClient.LOG.debug("DFSClient file " + src +
                    " is being concurrently append to" +
                    " but datanodes probably does not have block " +
                    last.getBlock(), e);
        }
      }
    }
    this.locatedBlocks = new DFSLocatedBlocks(newInfo);
    this.currentNode = null;
View Full Code Here

    // Check if NN recorded length matches on-disk length
    long onDiskLength = data.getFinalizedBlockLength(namespaceId, block);
    if (block.getNumBytes() > onDiskLength) {
      // Shorter on-disk len indicates corruption so report NN the corrupt block
      nn.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block,
          new DatanodeInfo[] { new DatanodeInfo(nsReg) }) });
      LOG.info("Can't replicate block " + block + " because on-disk length "
          + onDiskLength + " is shorter than NameNode recorded length "
          + block.getNumBytes());
      return;
View Full Code Here

            nlist);
        DatanodeInfo[] info = new DatanodeInfo[nlist.length];
        for (int i = 0; i < nlist.length; i++) {
          info[i] = new DatanodeInfo(nlist[i]);
        }
        return new LocatedBlock(newblock, info); // success
      }

      //failed
      StringBuilder b = new StringBuilder();
      for(BlockRecord r : syncList) {
View Full Code Here

      if (!checksum.compare(checksumBuf, checksumOff)) {
        if (srcDataNode != null) {
          try {
            LOG.info("report corrupt block " + block + " from datanode " +
                      srcDataNode + " to namenode");
            LocatedBlock lb = new LocatedBlock(block,
                                            new DatanodeInfo[] {srcDataNode});
            datanode.reportBadBlocks(namespaceId, new LocatedBlock[] {lb});
          } catch (IOException e) {
            LOG.warn("Failed to report bad block " + block +
                      " from datanode " + srcDataNode + " to namenode");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.