Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
      DatanodeInfo dninfo = datanodeinfos[i];
      final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
      corruptBlock(block, dn);
      LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
          + dninfo);

    }
View Full Code Here


          .getBlockLocations("/tmp/x", 0, 16);
      // Create a new block object, because the block inside LocatedBlock at
      // namenode is of type BlockInfo.
      ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
      Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
      final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
      ClientDatanodeProtocol proxy =
          DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
      try {
        proxy.getBlockLocalPathInfo(blk, token);
        Assert.fail("The call should have failed as this user "
View Full Code Here

  /**
   * Test conversion of LocatedBlock to BlockLocation
   */
  @Test
  public void testLocatedBlocks2Locations() {
    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;

    // ok
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
View Full Code Here

  /**
   * Test constructing LocatedBlock with null cachedLocs
   */
  @Test
  public void testLocatedBlockConstructorWithNullCachedLocs() {
    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;
   
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
View Full Code Here

      }
    });
    DomainPeer peer = getDomainPeerToDn(conf);
    MutableBoolean usedPeer = new MutableBoolean(false);
    ExtendedBlockId blockId = new ExtendedBlockId(123, "xyz");
    final DatanodeInfo datanode =
        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
    // Allocating the first shm slot requires using up a peer.
    Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
                    blockId, "testAllocShm_client");
    Assert.assertNotNull(slot);
    Assert.assertTrue(usedPeer.booleanValue());
View Full Code Here

    byte expected[] = DFSTestUtil.
        calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache =
        fs.dfs.getClientContext().getShortCircuitCache();
    final DatanodeInfo datanode =
        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
    cache.getDfsClientShmManager().visit(new Visitor() {
      @Override
      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
          throws IOException {
        Assert.assertEquals(1,  info.size());
View Full Code Here

      final DatanodeManager dm = getBlockManager().getDatanodeManager();     
      final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

      DatanodeInfo[] arr = new DatanodeInfo[results.size()];
      for (int i=0; i<arr.length; i++) {
        arr[i] = new DatanodeInfo(results.get(i));
      }
      return arr;
    } finally {
      readUnlock();
    }
View Full Code Here

    long offsetIntoBlock = target - targetBlock.getStartOffset();

    //
    // Connect to best DataNode for desired Block, with potential offset
    //
    DatanodeInfo chosenNode = null;
    while (s == null) {
      DNAddrPair retval = chooseDataNode(targetBlock);
      chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;
View Full Code Here

  private DNAddrPair chooseDataNode(LocatedBlock block)
    throws IOException {
    while (true) {
      DatanodeInfo[] nodes = block.getLocations();
      DatanodeInfo chosenNode = null;
      try {
        chosenNode = dfsClient.bestNode(nodes, deadNodes);
        InetSocketAddress targetAddr =
                          NetUtils.createSocketAddr(chosenNode.getName());
        return new DNAddrPair(chosenNode, targetAddr);
      } catch (IOException ie) {
        int failureTimes = DFSClient.dfsInputStreamfailures.get();
        String blockInfo = block.getBlock() + " file=" + src;
        if (failureTimes >= dfsClient.maxBlockAcquireFailures
            || failureTimes >= block.getLocations().length) {
          throw new BlockMissingException(src, "Could not obtain block: " +
              blockInfo, block.getStartOffset());
        }

        if (nodes == null || nodes.length == 0) {
          DFSClient.LOG.info("No node available for block: " + blockInfo);
        }
        DFSClient.LOG.info("Could not obtain block " + block.getBlock() +
                 " from node:  " +
                 (chosenNode == null ? "" : chosenNode.getHostName()) + ie +
                 ". Will get new block locations from namenode and retry...");      
        try {
          // Introducing a random factor to the wait time before another retry.
          // The wait time is dependent on # of failures and a random factor.
          // At the first time of getting a BlockMissingException, the wait time
View Full Code Here

      // cached block locations may have been updated by chooseDatNode()
      // or fetchBlockAt(). Always get the latest list of locations at the
      // start of the loop.
      block = getBlockAt(block.getStartOffset(), false, true);
      DNAddrPair retval = chooseDataNode(block);
      DatanodeInfo chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;
      BlockReader reader = null;
      int len = (int) (end - start + 1);

      try {
         if (DFSClient.LOG.isDebugEnabled()) {
           DFSClient.LOG.debug("fetchBlockByteRange shortCircuitLocalReads " +
                    dfsClient.shortCircuitLocalReads +
                    " localhst " + dfsClient.localHost +
                    " targetAddr " + targetAddr);
         }
         // first try reading the block locally.
         if (dfsClient.shortCircuitLocalReads &&
             NetUtils.isLocalAddressWithCaching(targetAddr.getAddress())) {
           reader = BlockReaderLocal.newBlockReader(dfsClient.conf, src,
                                                namespaceId, block.getBlock(),
                                                chosenNode,
                                                start,
                                                len,
                                                dfsClient.metrics,
                                                verifyChecksum,
                                                this.clearOsBuffer);
           reader.setReadLocal(true);
           reader.setFsStats(dfsClient.stats);

          } else {
            // go to the datanode
            dn = dfsClient.socketFactory.createSocket();
            NetUtils.connect(dn, targetAddr, dfsClient.socketTimeout,
                dfsClient.ipTosValue);
            dn.setSoTimeout(dfsClient.socketTimeout);
            reader = BlockReader.newBlockReader(dfsClient.getDataTransferProtocolVersion(),
                                            namespaceId,
                                            dn, src,
                                            block.getBlock().getBlockId(),
                                            block.getBlock().getGenerationStamp(),
                                            start, len, buffersize,
                                            verifyChecksum, dfsClient.clientName,
                                            dfsClient.minReadSpeedBps);
            boolean isLocalHost = NetUtils.isLocalAddress(targetAddr.getAddress());
            reader.setReadLocal(isLocalHost);
            if (!isLocalHost) {
              reader.setReadRackLocal(
                  dfsClient.isInLocalRack(targetAddr.getAddress()));
            }
            reader.setFsStats(dfsClient.stats);
          }
          int nread = reader.readAll(buf, offset, len);
          if (nread != len) {
            throw new IOException("truncated return from reader.read(): " +
                                  "excpected " + len + ", got " + nread);
          }
          return;
      } catch (ChecksumException e) {
        DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
                 src + " at " + block.getBlock() + ":" +
                 e.getPos() + " from " + chosenNode.getName());
        dfsClient.reportChecksumFailure(src, block.getBlock(), chosenNode);
      } catch (IOException e) {
        DFSClient.LOG.warn("Failed to connect to " + targetAddr +
                 " for file " + src +
                 " for block " + block.getBlock().getBlockId() + ":"  +
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.