Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


    Socket s = null;
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
View Full Code Here


                                TreeSet<DatanodeInfo> deadNodes) throws IOException {
    if ((nodes == null) ||
        (nodes.length - deadNodes.size() < 1)) {
      throw new IOException("No live nodes contain current block");
    }
    DatanodeInfo chosenNode;
    do {
      chosenNode = nodes[r.nextInt(nodes.length)];
    } while (deadNodes.contains(chosenNode));
    return chosenNode;
  }
View Full Code Here

      LocatedBlocksWithMetaInfo blocks = namenode.openAndFetchMetaInfo(
          path.toString(), 0, status.getLen());
      Assert.assertEquals(1, blocks.getLocatedBlocks().size());
      LocatedBlock block = blocks.getLocatedBlocks().get(0);
      Assert.assertEquals(1, block.getLocations().length);
      DatanodeInfo source = block.getLocations()[0];
      Set<DatanodeInfo> excluded = new HashSet<DatanodeInfo>();
      for (DatanodeInfo d : datanodes) {
        excluded.add(d);
      }
      excluded.remove(source);
      DatanodeInfo target = excluded.iterator().next();
      excluded.add(source);
      excluded.remove(target);
      BlockMover.BlockMoveAction action =
          blockMover.new BlockMoveAction(block, source, excluded, 1,
              blocks.getDataProtocolVersion(), blocks.getNamespaceID());
View Full Code Here

          new BlockAndDatanodeResolver(src, fs, parity, fs);
      for (int i = 0; i < srcInfos.size(); ++i) {
        LocatedBlock lb = resolver.getLocatedBlock(srcInfos.get(i));
        Assert.assertEquals(srcLbs.get(i).getBlock(), lb.getBlock());
        for (String nodeName : srcInfos.get(i).getNames()) {
          DatanodeInfo node = resolver.getDatanodeInfo(nodeName);
          Assert.assertEquals(node.getName(), nodeName);
        }
      }
      for (int i = 0; i < parityInfos.size(); ++i) {
        LocatedBlock lb = resolver.getLocatedBlock(parityInfos.get(i));
        Assert.assertEquals(parityLbs.get(i).getBlock(), lb.getBlock());
        for (String nodeName : parityInfos.get(i).getNames()) {
          DatanodeInfo node = resolver.getDatanodeInfo(nodeName);
          Assert.assertEquals(node.getName(), nodeName);
        }
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
View Full Code Here

      for (int i = 0; i < 3; ++i) {
        excluded.add(datanodes[i]);
      }
      final int NUM_TESTS = 10;
      for (int i = 0; i < NUM_TESTS;) {
        DatanodeInfo target = blockMover.cluster.getRandomNode(excluded);
        if (target == null) {
          continue;
        }
        Assert.assertFalse(excluded.contains(target));
        ++i;
View Full Code Here

  public void testCheckBlockLocations() throws IOException {
    setupCluster();
    try {
      FakeExecutorService fakeBlockMover = new FakeExecutorService();
      blockMover.executor = fakeBlockMover;
      DatanodeInfo sourceLocations[] = new DatanodeInfo[] {
          datanodes[0], datanodes[1], datanodes[2], // good
          datanodes[0], datanodes[1], datanodes[1], // bad 1==1
          datanodes[0], datanodes[1], datanodes[2], // good
          datanodes[0], datanodes[1], datanodes[2], // bad 0==0 with parity
          datanodes[0], datanodes[1// bad 0==0 with parity
      };
      DatanodeInfo parityLocations[] = new DatanodeInfo[] {
          datanodes[3], datanodes[4], // good
          datanodes[3], datanodes[4], // good
          datanodes[3], datanodes[3], // bad 3==3
          datanodes[0], datanodes[4], // bad 0==0 with pairity
          datanodes[0], datanodes[0// bad 0==0 with parity
View Full Code Here

  private BlockInfo createBlockInfo(Path file, LocatedBlock b) {
    DatanodeInfo[] locations = b.getLocations();
    String[] hosts = new String[locations.length];
    String[] names = new String[locations.length];
    for (int i = 0; i < locations.length; ++i) {
      DatanodeInfo d = locations[i];
      hosts[i] = d.getHost();
      names[i] = d.getName();
    }
   
    BlockLocation loc = new BlockLocation(
        names, hosts, b.getStartOffset(), b.getBlockSize());
    return new BlockInfo(loc, file);
View Full Code Here

                                                   getNameNodePort(nnIndex));
    DFSClient client = new DFSClient(addr, nameNodes[nnIndex].conf);
    int namespaceId = getNameNode(nnIndex).getNamespaceID();
    long startTime = System.currentTimeMillis();
    while (System.currentTimeMillis() < startTime + timeoutMillis) {
      DatanodeInfo report[] = client.datanodeReport(DatanodeReportType.LIVE);
      for (DatanodeInfo thisReport : report) {
        if (thisReport.getStorageID().equals(
              dn.getDNRegistrationForNS(namespaceId).getStorageID())) {
          if (thisReport.getLastUpdate() > startTime)
View Full Code Here

      out.writeLong(block.getGenerationStamp());
      out.writeInt(0);           // no pipelining
      out.writeBoolean(false);   // not part of recovery
      Text.writeString(out, ""); // client
      out.writeBoolean(true); // sending src node information
      DatanodeInfo srcNode = new DatanodeInfo();
      srcNode.write(out); // Write src node DatanodeInfo
      // write targets
      out.writeInt(0); // num targets
      // send data & checksum
      blockSender.sendBlock(out, baseStream, null, progress);
View Full Code Here

      namenode.commitBlockSynchronization(block,
          newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
          nlist);
      DatanodeInfo[] info = new DatanodeInfo[nlist.length];
      for (int i = 0; i < nlist.length; i++) {
        info[i] = new DatanodeInfo(nlist[i]);
      }
      return new LocatedBlock(newblock, info); // success
    }

    //failed
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.