Package org.apache.hadoop.hdfs.DFSClient

Examples of org.apache.hadoop.hdfs.DFSClient.BlockReader


  private BlockReader getBlockReader(
    int offset, int lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    Socket s = null;
    BlockReader blockReader = null;
    Block block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
    s = new Socket();
    s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
View Full Code Here


  /**
   * Verify that if we read an entire block, we send checksumOk
   */
  @Test
  public void testBlockVerification() throws Exception {
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024));
    slurpReader(reader, FILE_SIZE_K * 1024, true);
    verify(reader).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

  /**
   * Test that if we do an incomplete read, we don't call checksumOk
   */
  @Test
  public void testIncompleteRead() throws Exception {
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024));
    slurpReader(reader, FILE_SIZE_K / 2 * 1024, false);

    // We asked the blockreader for the whole file, and only read
    // half of it, so no checksumOk
    verify(reader, never()).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

   * the whole block or not.
   */
  @Test
  public void testCompletePartialRead() throws Exception {
    // Ask for half the file
    BlockReader reader = spy(getBlockReader(0, FILE_SIZE_K * 1024 / 2));
    // And read half the file
    slurpReader(reader, FILE_SIZE_K * 1024 / 2, true);
    verify(reader).checksumOk(reader.dnSock);
    reader.close();
  }
View Full Code Here

    int lengths[] = new int[] { 30, 300, 512, 513, 1025 };
    for (int startOffset : startOffsets) {
      for (int length : lengths) {
        DFSClient.LOG.info("Testing startOffset = " + startOffset + " and " +
                           " len=" + length);
        BlockReader reader = spy(getBlockReader(startOffset, length));
        slurpReader(reader, length, true);
        verify(reader).checksumOk(reader.dnSock);
        reader.close();
      }
    }
  }
View Full Code Here

      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);

      blockReader.setArtificialSlowdown(1000);
      blockReader.isReadLocal = false;
      blockReader.isReadRackLocal = false;
      blockReader.ENABLE_THROW_FOR_SLOW = true;
      for (int i = 0; i < 1024; i++) {
        in.readByte();
      }

      blockReader.setArtificialSlowdown(0);
      for (int i = 1024; i < fileSize - 1; i++) {
        in.readByte();
      }

      ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient.BlockReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.