Examples of DFSDataInputStream


Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    switch(op.getValue()) {
    case OPEN:
    {
      final int b = bufferSize.getValue(conf);
      final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
      DFSDataInputStream in = null;
      try {
        in = new DFSClient.DFSDataInputStream(
            dfsclient.open(fullpath, b, true));
        in.seek(offset.getValue());
      } catch(IOException ioe) {
        IOUtils.cleanup(LOG, in);
        IOUtils.cleanup(LOG, dfsclient);
        throw ioe;
      }
      final DFSDataInputStream dis = in;
      final StreamingOutput streaming = new StreamingOutput() {
        @Override
        public void write(final OutputStream out) throws IOException {
          final Long n = length.getValue();
          DFSDataInputStream dfsin = dis;
          DFSClient client = dfsclient;
          try {
            if (n == null) {
              IOUtils.copyBytes(dfsin, out, b);
            } else {
              IOUtils.copyBytes(dfsin, out, n, false);
            }
            dfsin.close();
            dfsin = null;
            dfsclient.close();
            client = null;
          } finally {
            IOUtils.cleanup(LOG, dfsin);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username,
                                 new String[] {"supergroup"});
   
    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
   
    final DFSDataInputStream in = (DFSDataInputStream)fs.open(p);

    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);

    //Able to read?
    for(int i = 0; i < expectedsize; i++) {
      Assert.assertEquals((byte)i, (byte)in.read())
    }

    in.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

  public void testRead() throws Exception{
    for(int i = 0; i < TEST_FILE_NUM; ++i) {
      String file = "/tmp" + i +".txt";
      DFSTestUtil.createFile(fs, new Path(file), FILE_LEN, (short)5, 1L);
     
      DFSDataInputStream in = (DFSDataInputStream)fs.open(new Path(file));
      int numOfRead = 0;
      while(in.read() > 0){
        numOfRead ++;
      }
      assertEquals(FILE_LEN * (i+1),
          metrics.readSize.getCurrentIntervalValue());
      assertEquals(numOfRead * (i+1),
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    fs.delete(root, true);
    files = null;
  }
 
  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
    DFSDataInputStream in =
      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
    in.readByte();
    return in.getCurrentBlock();
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

  /**
   * Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
   */
  static void checkFileContentDirect(FileSystem fs, Path name, byte[] expected,
      int readOffset) throws IOException {
    DFSDataInputStream stm = (DFSDataInputStream)fs.open(name);

    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);

    IOUtils.skipFully(stm, readOffset);

    actual.limit(3);

    //Read a small number of bytes first.
    int nread = stm.read(actual);
    actual.limit(nread + 2);
    nread += stm.read(actual);

    // Read across chunk boundary
    actual.limit(Math.min(actual.capacity(), nread + 517));
    nread += stm.read(actual);
    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
        "A few bytes");
    //Now read rest of it
    actual.limit(actual.capacity());
    while (actual.hasRemaining()) {
      int nbytes = stm.read(actual);

      if (nbytes < 0) {
        throw new EOFException("End of file reached before reading fully.");
      }
      nread += nbytes;
    }
    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
    stm.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

      FileSystem parityFs, boolean stripeVerified, StripeInfo si)
          throws StripeMismatchException {
    //setting remoteRackFlag for each of the input streams and verify the stripe
    for (int i = 0 ; i < codec.parityLength + codec.stripeLength ; i++) {
      if (parallelReader.streams[i] instanceof DFSDataInputStream) {
        DFSDataInputStream stream =
            (DFSDataInputStream) parallelReader.streams[i]
        if (i < codec.parityLength) {
          //Dealing with parity blocks
          remoteRackFlag[i] =
              !(((DistributedFileSystem)parityFs).getClient().
                  isInLocalRack(NetUtils.createSocketAddr(
                      stream.getCurrentDatanode().getName())));
          if (LOG.isDebugEnabled()) {
            LOG.debug("RemoteRackFlag at index " + i + " is " +
                remoteRackFlag[i]);
          }
          // Verify with parity Blocks
          if (stripeVerified == false) {
            Block currentBlock = stream.getCurrentBlock();
            if (!currentBlock.equals(si.parityBlocks.get(i))) {
              throw new StripeMismatchException("current block " +
                  currentBlock.toString() + " in position " + i + " doesn't "
                  + "match stripe info:" + si);
            }
          }
        } else {
          //Dealing with Source (file) block
          remoteRackFlag[i] =
              !(((DistributedFileSystem)srcFs).getClient().
                  isInLocalRack(NetUtils.createSocketAddr(
                      stream.getCurrentDatanode().getName())));
          if (LOG.isDebugEnabled()) {
            LOG.debug("RemoteRackFlag at index " + i + " is " +
                remoteRackFlag[i]);
          }
          // Verify with source Blocks
          if (stripeVerified == false) {
            Block currentBlock = stream.getCurrentBlock();
            if (!currentBlock.equals(si.srcBlocks.get(
                i - codec.parityLength))) {
              throw new StripeMismatchException("current block " +
                  currentBlock.toString() + " in position " + i + " doesn't "
                  + "match stripe info:" + si);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    Path p = new Path(fileName);
    for (int pri = 0; pri < 8; pri++) {
      createFile(p, pri);

      ioprioClass = ioprioData = 0;
      DFSDataInputStream in = (DFSDataInputStream) fs.open(p);

      byte[] buffer = new byte[BLOCK_SIZE * 2];
      ReadOptions options = new ReadOptions();
      options.setIoprio(NativeIO.IOPRIO_CLASS_BE, pri);
      in.read(BLOCK_SIZE / 2, buffer, 0, BLOCK_SIZE / 2, options);

      if (NativeIO.isAvailable()) {
        assertTrue(NativeIO.isIoprioPossible());
        assertEquals(NativeIO.IOPRIO_CLASS_BE, ioprioClass);
        assertEquals(pri, ioprioData);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

  public void testRead() throws Exception{
    for(int i = 0; i < TEST_FILE_NUM; ++i) {
      String file = "/tmp" + i +".txt";
      DFSTestUtil.createFile(fs, new Path(file), FILE_LEN, (short)5, 1L);
     
      DFSDataInputStream in = (DFSDataInputStream)fs.open(new Path(file));
      int numOfRead = 0;
      while(in.read() > 0){
        numOfRead ++;
      }
      assertEquals(FILE_LEN * (i+1),
          metrics.readSize.getCurrentIntervalValue());
      assertEquals(numOfRead * (i+1),
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream

    }
    this.outs = outs;
    for (int i = 0; i < streams.length; i++) {
      this.streams[i] = streams[i];
      if (this.streams[i] instanceof DFSDataInputStream) {
        DFSDataInputStream stream = (DFSDataInputStream)this.streams[i];
        // in directory raiding, the block size for each input stream
        // might be different, so we need to determine the endOffset of
        // each stream by their own block size.
        List<LocatedBlock> blocks = stream.getAllBlocks();
        if (blocks.size() == 0) {
          this.endOffsets[i] = Long.MAX_VALUE;
          if (computeChecksum) {
            this.checksums[i] = null;
          }
        } else {
          long blockSize = blocks.get(0).getBlockSize();
          this.endOffsets[i] = stream.getPos() + blockSize;
          if (computeChecksum) {
            this.checksums[i] = new CRC32();
          }
        }
      } else {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.