Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.BlockReader


  private void copyBlock(DFSClient dfs, LocatedBlock lblock,
                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();

    while (blockReader == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
      catch (IOException ie) {
        if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
          throw new IOException("Could not obtain block " + lblock, ie);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        String file = BlockReaderFactory.getFileName(targetAddr,
            block.getBlockPoolId(), block.getBlockId());
        blockReader = new BlockReaderFactory(dfs.getConf()).
            setFileName(file).
            setBlock(block).
            setBlockToken(lblock.getBlockToken()).
            setStartOffset(0).
            setLength(-1).
            setVerifyChecksum(true).
            setClientName("fsck").
            setDatanodeInfo(chosenNode).
            setInetSocketAddress(targetAddr).
            setCachingStrategy(CachingStrategy.newDropBehind()).
            setClientCacheContext(dfs.getClientContext()).
            setConfiguration(namenode.conf).
            setRemotePeerFactory(new RemotePeerFactory() {
              @Override
              public Peer newConnectedPeer(InetSocketAddress addr)
                  throws IOException {
                Peer peer = null;
                Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
                try {
                  s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
                  s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
                  peer = TcpPeerServer.peerFromSocketAndKey(s, namenode.getRpcServer().
                        getDataEncryptionKey());
                } finally {
                  if (peer == null) {
                    IOUtils.closeQuietly(s);
                  }
                }
                return peer;
              }
            }).
            build();
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
      }
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
                              ", but datanode returned " +bytesRead+" bytes");
      }
    } catch (Exception e) {
      LOG.error("Error reading block", e);
      success = false;
    } finally {
      blockReader.close();
    }
    if (!success) {
      throw new Exception("Could not copy block data for " + lblock.getBlock());
    }
  }
View Full Code Here


  // try reading a block using a BlockReader directly
  private static void tryRead(final Configuration conf, LocatedBlock lblock,
      boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    IOException ioe = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
      DatanodeInfo[] nodes = lblock.getLocations();
      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());

      blockReader = new BlockReaderFactory(new DFSClient.Conf(conf)).
          setFileName(BlockReaderFactory.getFileName(targetAddr,
                        "test-blockpoolid", block.getBlockId())).
          setBlock(block).
          setBlockToken(lblock.getBlockToken()).
          setInetSocketAddress(targetAddr).
          setStartOffset(0).
          setLength(-1).
          setVerifyChecksum(true).
          setClientName("TestBlockTokenWithDFS").
          setDatanodeInfo(nodes[0]).
          setCachingStrategy(CachingStrategy.newDefaultStrategy()).
          setClientCacheContext(ClientContext.getFromConf(conf)).
          setConfiguration(conf).
          setRemotePeerFactory(new RemotePeerFactory() {
            @Override
            public Peer newConnectedPeer(InetSocketAddress addr)
                throws IOException {
              Peer peer = null;
              Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
              try {
                sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
                peer = TcpPeerServer.peerFromSocket(sock);
              } finally {
                if (peer == null) {
                  IOUtils.closeSocket(sock);
                }
              }
              return peer;
            }
          }).
          build();
    } catch (IOException ex) {
      ioe = ex;
    } finally {
      if (blockReader != null) {
        try {
          blockReader.close();
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
View Full Code Here

      // start of the loop.
      block = getBlockAt(block.getStartOffset(), false, true);
      DNAddrPair retval = chooseDataNode(block);
      DatanodeInfo chosenNode = retval.info;
      InetSocketAddress targetAddr = retval.addr;
      BlockReader reader = null;
      int len = (int) (end - start + 1);

      try {
         if (DFSClient.LOG.isDebugEnabled()) {
           DFSClient.LOG.debug("fetchBlockByteRange shortCircuitLocalReads " +
                    dfsClient.shortCircuitLocalReads +
                    " localhst " + dfsClient.localHost +
                    " targetAddr " + targetAddr);
         }
         // first try reading the block locally.
         if (dfsClient.shortCircuitLocalReads &&
             NetUtils.isLocalAddressWithCaching(targetAddr.getAddress())) {
           reader = BlockReaderLocal.newBlockReader(dfsClient.conf, src,
                                                namespaceId, block.getBlock(),
                                                chosenNode,
                                                start,
                                                len,
                                                dfsClient.metrics,
                                                verifyChecksum,
                                                this.clearOsBuffer);
           reader.setReadLocal(true);
           reader.setFsStats(dfsClient.stats);

          } else {
            // go to the datanode
            dn = dfsClient.socketFactory.createSocket();
            NetUtils.connect(dn, targetAddr, dfsClient.socketTimeout,
                dfsClient.ipTosValue);
            dn.setSoTimeout(dfsClient.socketTimeout);
            reader = BlockReader.newBlockReader(dfsClient.getDataTransferProtocolVersion(),
                                            namespaceId,
                                            dn, src,
                                            block.getBlock().getBlockId(),
                                            block.getBlock().getGenerationStamp(),
                                            start, len, buffersize,
                                            verifyChecksum, dfsClient.clientName,
                                            dfsClient.minReadSpeedBps);
            boolean isLocalHost = NetUtils.isLocalAddress(targetAddr.getAddress());
            reader.setReadLocal(isLocalHost);
            if (!isLocalHost) {
              reader.setReadRackLocal(
                  dfsClient.isInLocalRack(targetAddr.getAddress()));
            }
            reader.setFsStats(dfsClient.stats);
          }
          int nread = reader.readAll(buf, offset, len);
          if (nread != len) {
            throw new IOException("truncated return from reader.read(): " +
                                  "excpected " + len + ", got " + nread);
          }
          return;
View Full Code Here

                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
      catch (IOException ie) {
        if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
          throw new IOException("Could not obtain block " + lblock, ie);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = NetUtils.getDefaultSocketFactory(conf).createSocket();
        s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
       
        String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(),
            block.getBlockId());
        blockReader = BlockReaderFactory.newBlockReader(dfs.getConf(),
            file, block, lblock.getBlockToken(), 0, -1, true, "fsck",
            TcpPeerServer.peerFromSocketAndKey(s, namenode.getRpcServer().
                getDataEncryptionKey()),
            chosenNode, null, null, null, false);
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
View Full Code Here

     
    int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
     
      // Use the block name for file name.
    String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
    BlockReader blockReader = BlockReaderFactory.newBlockReader(
        conf, s, file,
        new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
        offsetIntoBlock, amtToRead, encryptionKey);
       
    byte[] buf = new byte[(int)amtToRead];
    int readOffset = 0;
    int retries = 2;
    while ( amtToRead > 0 ) {
      int numRead = amtToRead;
      try {
        blockReader.readFully(buf, readOffset, amtToRead);
      }
      catch (IOException e) {
        retries--;
        if (retries == 0)
          throw new IOException("Could not read data from datanode");
View Full Code Here

     
    int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
     
      // Use the block name for file name.
    String file = BlockReaderFactory.getFileName(addr, poolId, blockId);
    BlockReader blockReader = BlockReaderFactory.newBlockReader(dfsConf, file,
        new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
        offsetIntoBlock, amtToRead,  true,
        "JspHelper", TcpPeerServer.peerFromSocketAndKey(s, encryptionKey),
        new DatanodeID(addr.getAddress().getHostAddress(),
            addr.getHostName(), poolId, addr.getPort(), 0, 0, 0), null,
            null, null, false, CachingStrategy.newDefaultStrategy());
       
    final byte[] buf = new byte[amtToRead];
    int readOffset = 0;
    int retries = 2;
    while ( amtToRead > 0 ) {
      int numRead = amtToRead;
      try {
        blockReader.readFully(buf, readOffset, amtToRead);
      }
      catch (IOException e) {
        retries--;
        if (retries == 0)
          throw new IOException("Could not read data from datanode");
        continue;
      }
      amtToRead -= numRead;
      readOffset += numRead;
    }
    blockReader.close();
    out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
  }
View Full Code Here

      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
      blockReader.isReadLocal = false;
      blockReader.isReadRackLocal = false;
      blockReader.ENABLE_THROW_FOR_SLOW = true;
      for (int i = 0; i < 1024; i++) {
        in.readByte();
      }

      blockReader.setArtificialSlowdown(0);
      for (int i = 1024; i < fileSize - 1; i++) {
        in.readByte();
      }

      ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
View Full Code Here

    s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
     
      long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);    
     
      // Use the block name for file name.
      BlockReader blockReader =
        BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                    namespaceId,
                                    s, addr.toString() + ":" + blockId,
                                    blockId, genStamp ,offsetIntoBlock,
                                    amtToRead,
                                    conf.getInt("io.file.buffer.size", 4096));
       
    byte[] buf = new byte[(int)amtToRead];
    int readOffset = 0;
    int retries = 2;
    while ( amtToRead > 0 ) {
      int numRead;
      try {
        numRead = blockReader.readAll(buf, readOffset, (int)amtToRead);
      }
      catch (IOException e) {
        retries--;
        if (retries == 0)
          throw new IOException("Could not read data from datanode");
View Full Code Here

                         OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = new Socket();
        s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
       
        blockReader =
            BlockReader.newBlockReader(DataTransferProtocol.DATA_TRANSFER_VERSION,
                                               nn.getNamesystem().getFSImage().namespaceID,
                                               s, targetAddr.toString() + ":" +
                                               block.getBlockId(),
                                               block.getBlockId(),
                                               block.getGenerationStamp(),
                                               0, -1,
                                               conf.getInt("io.file.buffer.size", 4096));
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
View Full Code Here

      final DataEncryptionKey encryptionKey)
          throws IOException {
    if (chunkSizeToView == 0) return;
    int amtToRead = (int)Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
     
    BlockReader blockReader = new BlockReaderFactory(dfsConf).
      setInetSocketAddress(addr).
      setBlock(new ExtendedBlock(poolId, blockId, 0, genStamp)).
      setFileName(BlockReaderFactory.getFileName(addr, poolId, blockId)).
      setBlockToken(blockToken).
      setStartOffset(offsetIntoBlock).
      setLength(amtToRead).
      setVerifyChecksum(true).
      setClientName("JspHelper").
      setClientCacheContext(ClientContext.getFromConf(conf)).
      setDatanodeInfo(new DatanodeInfo(
          new DatanodeID(addr.getAddress().getHostAddress(),
              addr.getHostName(), poolId, addr.getPort(), 0, 0, 0))).
      setCachingStrategy(CachingStrategy.newDefaultStrategy()).
      setConfiguration(conf).
      setRemotePeerFactory(new RemotePeerFactory() {
        @Override
        public Peer newConnectedPeer(InetSocketAddress addr)
            throws IOException {
          Peer peer = null;
          Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
          try {
            sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
            sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
            peer = TcpPeerServer.peerFromSocketAndKey(sock, encryptionKey);
          } finally {
            if (peer == null) {
              IOUtils.closeSocket(sock);
            }
          }
          return peer;
        }
      }).
      build();

    final byte[] buf = new byte[amtToRead];
    try {
      int readOffset = 0;
      int retries = 2;
      while (amtToRead > 0) {
        int numRead = amtToRead;
        try {
          blockReader.readFully(buf, readOffset, amtToRead);
        } catch (IOException e) {
          retries--;
          if (retries == 0)
            throw new IOException("Could not read data from datanode");
          continue;
        }
        amtToRead -= numRead;
        readOffset += numRead;
      }
    } finally {
      blockReader.close();
    }
    out.print(HtmlQuoting.quoteHtmlChars(new String(buf, Charsets.UTF_8)));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.BlockReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.