Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.BlockReader


  // try reading a block using a BlockReader directly
  private static void tryRead(final Configuration conf, LocatedBlock lblock,
      boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    IOException ioe = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
      DatanodeInfo[] nodes = lblock.getLocations();
      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

      blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
          setFileName(BlockReaderFactory.getFileName(targetAddr,
                        "test-blockpoolid", block.getBlockId())).
          setBlock(block).
          setBlockToken(lblock.getBlockToken()).
          setInetSocketAddress(targetAddr).
          setStartOffset(0).
          setLength(-1).
          setVerifyChecksum(true).
          setClientName("TestBlockTokenWithDFS").
          setDatanodeInfo(nodes[0]).
          setCachingStrategy(CachingStrategy.newDefaultStrategy()).
          setClientCacheContext(ClientContext.getFromConf(conf)).
          setConfiguration(conf).
          setRemotePeerFactory(new RemotePeerFactory() {
            @Override
            public Peer newConnectedPeer(InetSocketAddress addr)
                throws IOException {
              Peer peer = null;
              Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
              try {
                sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
                peer = TcpPeerServer.peerFromSocket(sock);
              } finally {
                if (peer == null) {
                  IOUtils.closeSocket(sock);
                }
              }
              return peer;
            }
          }).
          build();
    } catch (IOException ex) {
      ioe = ex;
    } finally {
      if (blockReader != null) {
        try {
          blockReader.close();
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
View Full Code Here


                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = new Socket();
        s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
       
        String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
            block.getBlockId());
        blockReader = BlockReaderFactory.newBlockReader(
            conf, s, file, block, lblock
            .getBlockToken(), 0, -1);
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
View Full Code Here

  // try reading a block using a BlockReader directly
  private static void tryRead(Configuration conf, LocatedBlock lblock,
      boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    Socket s = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
      DatanodeInfo[] nodes = lblock.getLocations();
      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
      s = NetUtils.getDefaultSocketFactory(conf).createSocket();
View Full Code Here

      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
      blockReader.isReadLocal = false;
      blockReader.isReadRackLocal = false;
      for (int i = 0; i < 1024; i++) {
        in.readByte();
      }

      blockReader.setArtificialSlowdown(0);
      for (int i = 1024; i < fileSize - 1; i++) {
        in.readByte();
      }

      ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
View Full Code Here

      Socket sock = dfsClient.socketFactory.createSocket();
      sock.setTcpNoDelay(true);
      NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout);
      sock.setSoTimeout(dfsClient.socketTimeout);
     
      BlockReader reader =
          BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src,
              blockId, generationStamp, startOffset, len, buffersize,
              verifyChecksum, clientName, bytesToCheckReadSpeed,
              minReadSpeedBps, false, cliData, options);
      return reader;
    }
   
    // Allow retry since there is no way of knowing whether the cached socket
    // is good until we actually use it
    for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
      Socket sock = socketCache.get(dnAddr);
      if (sock == null) {
        fromCache = false;
       
        sock = dfsClient.socketFactory.createSocket();
        /**
         * TCP_NODELAY is crucial here because of bad interactions between
         * Nagle's alglrithm and delayed ACKs. With connection keepalive
         * between the client and DN, the conversation looks like:
         * 1. Client -> DN: Read block X
         * 2. DN -> client: data for block X;
         * 3. Client -> DN: Status OK (successful read)
         * 4. Client -> DN: Read block Y
         *
         * The fact that step #3 and #4 are both in the client -> DN direction
         * triggers Nagling. If the DN is using delayed ACKS, this results in
         * a delay of 40ms or more.
         *
         * TCP_NODELAY disables nagling and thus avoid this performance
         * disaster.
         */
        sock.setTcpNoDelay(true);
       
        NetUtils.connect(sock, dnAddr, dfsClient.socketTimeout);
        sock.setSoTimeout(dfsClient.socketTimeout);
      }
     
      try {
        // The OP_READ_BLOCK request is sent as we make the BlockReader
        BlockReader reader =
            BlockReader.newBlockReader(protocolVersion, namespaceId, sock, src,
                blockId, generationStamp, startOffset, len, buffersize,
                verifyChecksum, clientName, bytesToCheckReadSpeed,
                minReadSpeedBps, true, cliData, options);
        return reader;
View Full Code Here

    InjectionHandler.processEvent(InjectionEvent.DFSCLIENT_START_FETCH_FROM_DATANODE);
   
    boolean success = false;
    DatanodeInfo chosenNode = datanode.info;
    InetSocketAddress targetAddr = datanode.addr;
    BlockReader reader = null;
    int len = (int) (end - start + 1);

    try {
      if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("fetchBlockByteRange shortCircuitLocalReads " +
                 dfsClient.shortCircuitLocalReads +
                 " localhst " + dfsClient.localHost +
                 " targetAddr " + targetAddr);
      }
      // first try reading the block locally.
      if (dfsClient.shortCircuitLocalReads && NetUtils.isLocalAddress(targetAddr.getAddress())) {
        reader = BlockReaderLocalBase.newBlockReader(dfsClient.conf, src,
                                             dfsClient.namespaceId, block.getBlock(),
                                             chosenNode,
                                             start,
                                             len,
                                             dfsClient.metrics,
                                             verifyChecksum,
                                             this.clearOsBuffer,
                                             false);
     
        if (reader != null) {
           reader.setReadLocal(true);
           reader.setFsStats(dfsClient.stats);
        } else if (!dfsClient.shortcircuitDisableWhenFail) {
          throw new IOException(
              "Short circuit local read not supported for this scase");
        }
      }
      if (reader == null) {
        // go to the datanode
        reader = getBlockReader(dfsClient.getDataTransferProtocolVersion(),
            dfsClient.namespaceId,
            targetAddr, src,
            block.getBlock().getBlockId(),
            block.getBlock().getGenerationStamp(),
            start, len, buffersize,
            verifyChecksum, dfsClient.clientName,
            dfsClient.bytesToCheckReadSpeed,
            dfsClient.minReadSpeedBps, true,
            cliData, options);
        boolean isLocalHost = NetUtils.isLocalAddressWithCaching(targetAddr
          .getAddress());
        reader.setReadLocal(isLocalHost);
        if (!isLocalHost) {
          reader.setReadRackLocal(
              dfsClient.isInLocalRack(targetAddr));
        }
        reader.setFsStats(dfsClient.stats);
        if (cliData != null) {
          cliData.recordPreadGetBlockReaderTime();
        }
      }
      int nread = reader.readAll(buf, offset, len);
      if (cliData != null) {
        cliData.recordPreadAllTime();
      }
      if (nread != len) {
        throw new IOException("truncated return from reader.read(): " +
View Full Code Here

    s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
     
      long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);    
     
      // Use the block name for file name.
      BlockReader blockReader =
        BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                    namespaceId,
                                    s, addr.toString() + ":" + blockId,
                                    blockId, genStamp ,offsetIntoBlock,
                                    amtToRead,
                                    conf.getInt("io.file.buffer.size", 4096));
       
    byte[] buf = new byte[(int)amtToRead];
    int readOffset = 0;
    int retries = 2;
    while ( amtToRead > 0 ) {
      int numRead;
      try {
        numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
      }
      catch (IOException e) {
        retries--;
        if (retries == 0)
          throw new IOException("Could not read data from datanode");
View Full Code Here

                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = new Socket();
        s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
       
        blockReader =
            BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                               nn.getNamesystem().getFSImage().storage.namespaceID,
                                               s, targetAddr.toString() + ":" +
                                               block.getBlockId(),
                                               block.getBlockId(),
                                               block.getGenerationStamp(),
                                               0, -1,
                                               conf.getInt("io.file.buffer.size", 4096));
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.BlockReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.