Package org.apache.hadoop.io.file.tfile.TFile

Examples of org.apache.hadoop.io.file.tfile.TFile.Reader


  @Test
  public void testFailureWriterNotClosed() throws IOException {
    if (skip)
      return;
    Reader reader = null;
    try {
      reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
      Assert.fail("Cannot read before closing the writer.");
    } catch (IOException e) {
      // noop, expecting exceptions
    } finally {
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here


    outMeta.write(123);
    outMeta.write("foo".getBytes());
    outMeta.close();
    closeOutput();

    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    DataInputStream mb = reader.getMetaBlock("testX");
    Assert.assertNotNull(mb);
    mb.close();
    try {
      DataInputStream mbBad = reader.getMetaBlock("testY");
      Assert.fail("Error on handling non-existent metablocks.");
    } catch (Exception e) {
      // noop, expecting exceptions
    }
    reader.close();
  }
View Full Code Here

  public void testFailureReadValueManyTimes() throws IOException {
    if (skip)
      return;
    writeRecords(5);

    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();

    byte[] vbuf = new byte[BUF_SIZE];
    int vlen = scanner.entry().getValueLength();
    scanner.entry().getValue(vbuf);
    Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + 0);
    try {
      scanner.entry().getValue(vbuf);
      Assert.fail("Cannot get the value mlutiple times.");
    } catch (Exception e) {
      // noop, expecting exceptions
    }

    scanner.close();
    reader.close();
  }
View Full Code Here

    // create an absolutely empty file
    path = new Path(fs.getWorkingDirectory(), outputFile);
    out = fs.create(path);
    out.close();
    try {
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
      Assert.fail("Error on handling empty files.");
    } catch (EOFException e) {
      // noop, expecting exceptions
    }
  }
View Full Code Here

      rand.nextBytes(buf);
      out.write(buf);
    }
    out.close();
    try {
      new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
      Assert.fail("Error on handling random files.");
    } catch (IOException e) {
      // noop, expecting exceptions
    }
  }
View Full Code Here

  public void testFailureNegativeOffset_2() throws IOException {
    if (skip)
      return;
    closeOutput();

    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
      scanner.lowerBound("keyX".getBytes(), -1, 4);
      Assert.fail("Error on handling negative offset.");
    } catch (Exception e) {
      // noop, expecting exceptions
    } finally {
      reader.close();
      scanner.close();
    }
    closeOutput();
  }
View Full Code Here

  public void testFailureNegativeLength_2() throws IOException {
    if (skip)
      return;
    closeOutput();

    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
      scanner.lowerBound("keyX".getBytes(), 0, -1);
      Assert.fail("Error on handling negative length.");
    } catch (Exception e) {
      // noop, expecting exceptions
    } finally {
      scanner.close();
      reader.close();
    }
    closeOutput();
  }
View Full Code Here

  public void testFailureNegativeLength_3() throws IOException {
    if (skip)
      return;
    writeRecords(3);

    Reader reader =
        new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    try {
      // test negative array offset
      try {
        scanner.seekTo("keyY".getBytes(), -1, 4);
        Assert.fail("Failed to handle negative offset.");
      } catch (Exception e) {
        // noop, expecting exceptions
      }

      // test negative array length
      try {
        scanner.seekTo("keyY".getBytes(), 0, -2);
        Assert.fail("Failed to handle negative key length.");
      } catch (Exception e) {
        // noop, expecting exceptions
      }
    } finally {
      reader.close();
      scanner.close();
    }
  }
View Full Code Here

    readRecords(fs, path, count, conf);
  }

  static void readRecords(FileSystem fs, Path path, int count,
      Configuration conf) throws IOException {
    Reader reader =
        new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();

    try {
      for (int nx = 0; nx < count; nx++, scanner.advance()) {
        Assert.assertFalse(scanner.atEnd());
        // Assert.assertTrue(scanner.next());

        byte[] kbuf = new byte[BUF_SIZE];
        int klen = scanner.entry().getKeyLength();
        scanner.entry().getKey(kbuf);
        Assert.assertEquals(new String(kbuf, 0, klen), composeSortedKey(KEY,
            nx));

        byte[] vbuf = new byte[BUF_SIZE];
        int vlen = scanner.entry().getValueLength();
        scanner.entry().getValue(vbuf);
        Assert.assertEquals(new String(vbuf, 0, vlen), VALUE + nx);
      }

      Assert.assertTrue(scanner.atEnd());
      Assert.assertFalse(scanner.advance());
    } finally {
      scanner.close();
      reader.close();
    }
  }
View Full Code Here

      reader.close();
    }
  }

  private void checkBlockIndex(int recordIndex, int blockIndexExpected) throws IOException {
    Reader reader = new Reader(fs.open(path), fs.getFileStatus(path).getLen(), conf);
    Scanner scanner = reader.createScanner();
    scanner.seekTo(composeSortedKey(KEY, recordIndex).getBytes());
    Assert.assertEquals(blockIndexExpected, scanner.currentLocation
        .getBlockIndex());
    scanner.close();
    reader.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.file.tfile.TFile.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.