Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DatanodeInfo


      // cached block locations may have been updated by chooseDatNode()
      // or fetchBlockAt(). Always get the latest list of locations at the
      // start of the loop.
      block = getBlockAt(block.getStartOffset(), false, true);
      DNAddrPair retval = chooseDataNode(block);
      DatanodeInfo chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;
      ByteBuffer result = null;
      BlockReaderLocal localReader = null;
      BlockReaderAccelerator remoteReader = null;

       try {
         if (DFSClient.LOG.isDebugEnabled()) {
           DFSClient.LOG.debug("fetchBlockByteRangeScatterGather " +
                    " localhst " + dfsClient.localHost +
                    " targetAddr " + targetAddr);
         }
        
         // first try reading the block locally.
         if (dfsClient.shortCircuitLocalReads &&
             NetUtils.isLocalAddressWithCaching(targetAddr.getAddress())) {
           localReader = BlockReaderLocal.newBlockReader(dfsClient.conf, src,
                                                namespaceId, block.getBlock(),
                                                chosenNode,
                                                start,
                                                len,
                                                dfsClient.metrics,
                                                verifyChecksum,
                                                this.clearOsBuffer);
           localReader.setReadLocal(true);
           localReader.setFsStats(dfsClient.stats);
           result = localReader.readAll();

         } else {
        
           // go to the datanode
           dn = dfsClient.socketFactory.createSocket();
           NetUtils.connect(dn, targetAddr, dfsClient.socketTimeout,
               dfsClient.ipTosValue);
           dn.setSoTimeout(dfsClient.socketTimeout);
           remoteReader = new BlockReaderAccelerator(dfsClient.conf,
                                          targetAddr,
                                          chosenNode,
                                          dfsClient.getDataTransferProtocolVersion(),
                                          namespaceId, dfsClient.clientName,
                                          dn, src,
                                          block,
                                          start, len,
                                          verifyChecksum, dfsClient.metrics);
           result = remoteReader.readAll();
          }
          if (result.remaining() != len) {
            throw new IOException("truncated return from reader.read(): " +
                                "expected " + len + ", got " +
                                  result.remaining());
          }
          if (NetUtils.isLocalAddress(targetAddr.getAddress())) {
            dfsClient.stats.incrementLocalBytesRead(len);
            dfsClient.stats.incrementRackLocalBytesRead(len);
          } else if (dfsClient.isInLocalRack(targetAddr.getAddress())) {
            dfsClient.stats.incrementRackLocalBytesRead(len);
          }

          return result;
      } catch (ChecksumException e) {
        DFSClient.LOG.warn("fetchBlockByteRangeScatterGather(). Got a checksum exception for " +
                 src + " at " + block.getBlock() + ":" +
                 e.getPos() + " from " + chosenNode.getName());
        dfsClient.reportChecksumFailure(src, block.getBlock(), chosenNode);
      } catch (IOException e) {
        DFSClient.LOG.warn("Failed to connect to " + targetAddr +
                 " for file " + src +
                 " for block " + block.getBlock().getBlockId() + ":"  +
View Full Code Here


   */
  public synchronized boolean seekToNewSource(long targetPos,
      boolean throwWhenNotFound) throws IOException {
    boolean markedDead = deadNodes.containsKey(currentNode);
    addToDeadNodes(currentNode);
    DatanodeInfo oldNode = currentNode;
    DatanodeInfo newNode = blockSeekTo(targetPos, throwWhenNotFound);
    if (!markedDead) {
      /* remove it from deadNodes. blockSeekTo could have cleared
       * deadNodes and added currentNode again. Thats ok. */
      deadNodes.remove(oldNode);
    }
    if (!oldNode.getStorageID().equals(newNode.getStorageID())) {
      currentNode = newNode;
      return true;
    } else {
      return false;
    }
View Full Code Here

    // Check if NN recorded length matches on-disk length
    long onDiskLength = data.getFinalizedBlockLength(namespaceId, block);
    if (block.getNumBytes() > onDiskLength) {
      // Shorter on-disk len indicates corruption so report NN the corrupt block
      nn.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block,
          new DatanodeInfo[] { new DatanodeInfo(nsReg) }) });
      LOG.info("Can't replicate block " + block + " because on-disk length "
          + onDiskLength + " is shorter than NameNode recorded length "
          + block.getNumBytes());
      return;
    }
View Full Code Here

        nsNamenode.commitBlockSynchronization(block,
            newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
            nlist);
        DatanodeInfo[] info = new DatanodeInfo[nlist.length];
        for (int i = 0; i < nlist.length; i++) {
          info[i] = new DatanodeInfo(nlist[i]);
        }
        return new LocatedBlock(newblock, info); // success
      }

      //failed
View Full Code Here

        out = new DataOutputStream(new BufferedOutputStream(baseStream,
                                                            SMALL_BUFFER_SIZE));

        blockSender = new BlockSender(srcNamespaceId, b, 0, b.getNumBytes(),
            false, false, false, datanode);
        DatanodeInfo srcNode = new DatanodeInfo(getDNRegistrationForNS(srcNamespaceId));

        //
        // Header info
        //
        WriteBlockHeader header = new WriteBlockHeader(
View Full Code Here

   * @param in The stream to read from
   * @throws IOException
   */
  private void writeBlock(DataInputStream in,
      VersionAndOpcode versionAndOpcode) throws IOException {
    DatanodeInfo srcDataNode = null;
    LOG.debug("writeBlock receive buf size " + s.getReceiveBufferSize() +
              " tcp no delay " + s.getTcpNoDelay());
    //
    // Read in the header
    //
    long startTime = System.currentTimeMillis();
   
    WriteBlockHeader headerToReceive = new WriteBlockHeader(
        versionAndOpcode);
    headerToReceive.readFields(in);
    int namespaceid = headerToReceive.getNamespaceId();
    Block block = new Block(headerToReceive.getBlockId(),
        dataXceiverServer.estimateBlockSize, headerToReceive.getGenStamp());
    LOG.info("Receiving block " + block +
             " src: " + remoteAddress +
             " dest: " + localAddress);
    int pipelineSize = headerToReceive.getPipelineDepth(); // num of datanodes in entire pipeline
    boolean isRecovery = headerToReceive.isRecoveryFlag(); // is this part of recovery?
    String client = headerToReceive.getClientName(); // working on behalf of this client
    boolean hasSrcDataNode = headerToReceive.isHasSrcDataNode(); // is src node info present
    if (hasSrcDataNode) {
      srcDataNode = headerToReceive.getSrcDataNode();
    }
    int numTargets = headerToReceive.getNumTargets();
    DatanodeInfo targets[] = headerToReceive.getNodes();

    DataOutputStream mirrorOut = null// stream to next target
    DataInputStream mirrorIn = null;    // reply from next target
    DataOutputStream replyOut = null;   // stream to prev target
    Socket mirrorSock = null;           // socket to next target
View Full Code Here

    long blockId = replaceBlockHeader.getBlockId();
    long genStamp = replaceBlockHeader.getGenStamp();
    Block block = new Block(blockId, dataXceiverServer.estimateBlockSize,
        genStamp);
    String sourceID = replaceBlockHeader.getSourceID();
    DatanodeInfo proxySource = replaceBlockHeader.getProxySource();

    if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
      LOG.warn("Not able to receive block " + blockId + " from "
          + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
      sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR,
          datanode.socketWriteTimeout);
      return;
    }

    Socket proxySock = null;
    DataOutputStream proxyOut = null;
    short opStatus = DataTransferProtocol.OP_STATUS_SUCCESS;
    BlockReceiver blockReceiver = null;
    DataInputStream proxyReply = null;
    long totalReceiveSize = 0;
    long writeDuration;
   
    updateCurrentThreadName("replacing block " + block + " from " + sourceID);
    try {
      // get the output stream to the proxy
      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
          proxySource.getName());
      proxySock = datanode.newSocket();
      NetUtils.connect(proxySock, proxyAddr, datanode.socketTimeout);
      proxySock.setSoTimeout(datanode.socketTimeout);

      OutputStream baseStream = NetUtils.getOutputStream(proxySock,
View Full Code Here

  private void handleScanFailure(Block block) {
   
    LOG.info("Reporting bad block " + block + " to namenode.");
   
    try {
      DatanodeInfo[] dnArr = { new DatanodeInfo(datanode.getDNRegistrationForNS(namespaceId)) };
      LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
      datanode.reportBadBlocks(namespaceId,blocks);
    } catch (IOException e){
      /* One common reason is that NameNode could be in safe mode.
       * Should we keep on retrying in that case?
View Full Code Here

      final DatanodeManager dm = getBlockManager().getDatanodeManager();     
      final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);

      DatanodeInfo[] arr = new DatanodeInfo[results.size()];
      for (int i=0; i<arr.length; i++) {
        arr[i] = new DatanodeInfo(results.get(i));
      }
      return arr;
    } finally {
      readUnlock();
    }
View Full Code Here

      checkOperation(OperationCategory.WRITE);
      for (int i = 0; i < blocks.length; i++) {
        ExtendedBlock blk = blocks[i].getBlock();
        DatanodeInfo[] nodes = blocks[i].getLocations();
        for (int j = 0; j < nodes.length; j++) {
          DatanodeInfo dn = nodes[j];
          blockManager.findAndMarkBlockAsCorrupt(blk, dn,
              "client machine reported it");
        }
      }
    } finally {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DatanodeInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.