Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


      // get blocks of size -1 from dataNodes[0]
      getBlocksWithException(namenode, dataNodes[0], -1);

      // get blocks of size BlockSize from a non-existent datanode
      getBlocksWithException(namenode, new DatanodeInfo(), 2);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here


  private void getBlocksWithException(NamenodeProtocol namenode,
                                      DatanodeInfo datanode,
                                      long size) throws IOException {
    boolean getException = false;
    try {
        namenode.getBlocks(new DatanodeInfo(), 2);
    } catch(RemoteException e) {
      getException = true;
      assertTrue(e.getMessage().contains("IllegalArgumentException"));
    }
    assertTrue(getException);
View Full Code Here

   *
   * @param in The stream to read from
   * @throws IOException
   */
  private void writeBlock(DataInputStream in) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() +
              " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    Block block = new Block(in.readLong(),
        dataXceiverServer.estimateBlockSize, in.readLong());
    LOG.info("Receiving block " + block +
             " src: " + remoteAddress +
             " dest: " + localAddress);
    int pipelineSize = in.readInt(); // num of datanodes in entire pipeline
    boolean isRecovery = in.readBoolean(); // is this part of recovery?
    String client = Text.readString(in); // working on behalf of this client
    boolean hasSrcDataNode = in.readBoolean(); // is src node info present
    if (hasSrcDataNode) {
      srcDataNode = new DatanodeInfo();
      srcDataNode.readFields(in);
    }
    int numTargets = in.readInt();
    if (numTargets < 0) {
      throw new IOException("Mislabelled incoming datastream.");
    }
    DatanodeInfo targets[] = new DatanodeInfo[numTargets];
    for (int i = 0; i < targets.length; i++) {
      DatanodeInfo tmp = new DatanodeInfo();
      tmp.readFields(in);
      targets[i] = tmp;
    }

    DataOutputStream mirrorOut = null// stream to next target
    DataInputStream mirrorIn = null;    // reply from next target
View Full Code Here

    /* read header */
    long blockId = in.readLong();
    Block block = new Block(blockId, dataXceiverServer.estimateBlockSize,
        in.readLong()); // block id & generation stamp
    String sourceID = Text.readString(in); // read del hint
    DatanodeInfo proxySource = new DatanodeInfo(); // read proxy source
    proxySource.readFields(in);

    if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
      LOG.warn("Not able to receive block " + blockId + " from "
          + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
      sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR,
          datanode.socketWriteTimeout);
      return;
    }

    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS;
    BlockReceiver blockReceiver = null;
    DataInputStream proxyReply = null;
   
    try {
      // get the output stream to the proxy
      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
          proxySource.getName());
      proxySock = datanode.newSocket();
      proxySock.connect(proxyAddr, datanode.socketTimeout);
      proxySock.setSoTimeout(datanode.socketTimeout);

      OutputStream baseStream = NetUtils.getOutputStream(proxySock,
View Full Code Here

    DistributedFileSystem dfs = (DistributedFileSystem) filesys;
    boolean done = false;
    boolean foundNode = false;
    DatanodeInfo[] datanodes = dfs.getDataNodeStats();
    for (int i = 0; i < datanodes.length; i++) {
      DatanodeInfo dn = datanodes[i];
      if (dn.getName().equals(node)) {
        if (state == NodeState.DECOMMISSIONED) {
          done = dn.isDecommissioned();
        } else if (state == NodeState.DECOMMISSION_INPROGRESS) {
          done = dn.isDecommissionInProgress();
        } else {
          done = (!dn.isDecommissionInProgress() && !dn.isDecommissioned());
        }
        System.out.println(dn.getDatanodeReport());
        foundNode = true;
      }
    }
    if (!foundNode) {
      throw new IOException("Could not find node: " + node);
View Full Code Here

    Socket s = null;
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
View Full Code Here

                                TreeSet<DatanodeInfo> deadNodes) throws IOException {
    if ((nodes == null) ||
        (nodes.length - deadNodes.size() < 1)) {
      throw new IOException("No live nodes contain current block");
    }
    DatanodeInfo chosenNode;
    do {
      chosenNode = nodes[r.nextInt(nodes.length)];
    } while (deadNodes.contains(chosenNode));
    return chosenNode;
  }
View Full Code Here

      int numTargets = xferTargets[i].length;
      if (numTargets > 0) {
        if (LOG.isInfoEnabled()) {
          StringBuilder xfersBuilder = new StringBuilder();
          for (int j = 0; j < numTargets; j++) {
            DatanodeInfo nodeInfo = xferTargets[i][j];
            xfersBuilder.append(nodeInfo.getName());
            if (j < (numTargets - 1)) {
              xfersBuilder.append(", ");
            }
          }
          String xfersTo = xfersBuilder.toString();
View Full Code Here

      namenode.commitBlockSynchronization(block,
          newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
          nlist);
      DatanodeInfo[] info = new DatanodeInfo[nlist.length];
      for (int i = 0; i < nlist.length; i++) {
        info[i] = new DatanodeInfo(nlist[i]);
      }
      return new LocatedBlock(newblock, info); // success
    }

    //failed
View Full Code Here

        out = new DataOutputStream(new BufferedOutputStream(baseStream,
                                                            SMALL_BUFFER_SIZE));

        blockSender = new BlockSender(b, 0, -1, false, false, false,
            datanode);
        DatanodeInfo srcNode = new DatanodeInfo(dnRegistration);

        //
        // Header info
        //
        out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
        out.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
        out.writeLong(b.getBlockId());
        out.writeLong(b.getGenerationStamp());
        out.writeInt(0);           // no pipelining
        out.writeBoolean(false);   // not part of recovery
        Text.writeString(out, ""); // client
        out.writeBoolean(true); // sending src node information
        srcNode.write(out); // Write src node DatanodeInfo
        // write targets
        out.writeInt(targets.length - 1);
        for (int i = 1; i < targets.length; i++) {
          targets[i].write(out);
        }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.