Examples of HdfsDataInputStream


Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    return ((HdfsDataInputStream) in).getAllBlocks();
  }

  public static List<LocatedBlock> getAllBlocks(FileSystem fs, Path path)
      throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    return in.getAllBlocks();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    String downnode, int numDatanodes) throws IOException {
    boolean isNodeDown = (downnode != null);
    // need a raw stream
    assertTrue("Not HDFS:"+fileSys.getUri(),
        fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream)
        ((DistributedFileSystem)fileSys).open(name);
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    for (LocatedBlock blk : dinfo) { // for each block
      int hasdown = 0;
      DatanodeInfo[] nodes = blk.getLocations();
      for (int j = 0; j < nodes.length; j++) { // for each replica
        if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

            "waitReplication: " + e);
      }
      fsIn = fs.open(TEST_PATH);
      IOUtils.readFully(fsIn, original, 0,
          BlockReaderLocalTest.TEST_LENGTH);
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
          dfsIn.getReadStatistics().getTotalLocalBytesRead());
      if (isShortCircuit) {
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
            dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
      } else {
        Assert.assertEquals(0,
            dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
      }
      fsIn.close();
      fsIn = null;
    } finally {
      DFSInputStream.tcpReadsDisabledForTesting = false;
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      ByteBuffer result = fsIn.read(null, 4096,
          EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      fsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);

      // Try to read 8192, but only get 4096 because of the block size.
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      ByteBuffer result =
        dfsIn.read(null, 8192, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
     
      // Try to read 4097, but only get 4096 because of the block size.
      result =
          dfsIn.read(null, 4097, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 4096, 8192),
          byteBufferToArray(result));
      dfsIn.releaseBuffer(result);
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
      if (cluster != null) cluster.shutdown();
    }
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

      fsIn = fs.open(TEST_PATH);
      byte original[] = new byte[TEST_FILE_LENGTH];
      IOUtils.readFully(fsIn, original, 0, TEST_FILE_LENGTH);
      fsIn.close();
      fsIn = fs.open(TEST_PATH);
      HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
      ByteBuffer result;
      try {
        result = dfsIn.read(null, 4097, EnumSet.noneOf(ReadOption.class));
        Assert.fail("expected UnsupportedOperationException");
      } catch (UnsupportedOperationException e) {
        // expected
      }
      result = dfsIn.read(null, 4096, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.assertEquals(4096, result.remaining());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalBytesRead());
      Assert.assertEquals(4096,
          dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 4096),
          byteBufferToArray(result));
    } finally {
      if (fsIn != null) fsIn.close();
      if (fs != null) fs.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FSDataInputStream>() {
      @Override
      public FSDataInputStream doCall(final Path p)
          throws IOException, UnresolvedLinkException {
        return new HdfsDataInputStream(
            dfs.open(getPathName(p), bufferSize, verifyChecksum));
      }
      @Override
      public FSDataInputStream next(final FileSystem fs, final Path p)
          throws IOException {
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

          "Input streams must be types of HdfsDataInputStream");
   
    LocatedBlock lblocks[] = new LocatedBlock[2];

    // Find block in data stream.
    HdfsDataInputStream dfsIn = (HdfsDataInputStream) in;
    ExtendedBlock dataBlock = dfsIn.getCurrentBlock();
    if (dataBlock == null) {
      LOG.error("Error: Current block in data stream is null! ");
      return false;
    }
    DatanodeInfo[] dataNode = {dfsIn.getCurrentDatanode()};
    lblocks[0] = new LocatedBlock(dataBlock, dataNode);
    LOG.info("Found checksum error in data stream at "
        + dataBlock + " on datanode="
        + dataNode[0]);

    // Find block in checksum stream
    HdfsDataInputStream dfsSums = (HdfsDataInputStream) sums;
    ExtendedBlock sumsBlock = dfsSums.getCurrentBlock();
    if (sumsBlock == null) {
      LOG.error("Error: Current block in checksum stream is null! ");
      return false;
    }
    DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()};
    lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
    LOG.info("Found checksum error in checksum stream at "
        + sumsBlock + " on datanode=" + sumsNode[0]);

    // Ask client to delete blocks.
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static ExtendedBlock getFirstBlock(FileSystem fs, Path path) throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    try {
      in.readByte();
      return in.getCurrentBlock();
    } finally {
      in.close();
    }
  } 
View Full Code Here

Examples of org.apache.hadoop.hdfs.client.HdfsDataInputStream

    return ((HdfsDataInputStream) in).getAllBlocks();
  }

  public static List<LocatedBlock> getAllBlocks(FileSystem fs, Path path)
      throws IOException {
    HdfsDataInputStream in = (HdfsDataInputStream) fs.open(path);
    return in.getAllBlocks();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.