Package org.apache.hadoop.zebra.tfile.TFile

Examples of org.apache.hadoop.zebra.tfile.TFile.Writer


  void createFile(int count, String compress) throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile + "." + compress);
    fs = path.getFileSystem(conf);
    FSDataOutputStream out = fs.create(path);
    Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

    int nx;
    for (nx = 0; nx < count; nx++) {
      byte[] key = composeSortedKey(KEY, count, nx).getBytes();
      byte[] value = (VALUE + nx).getBytes();
      writer.append(key, value);
    }
    writer.close();
    out.close();
  }
View Full Code Here


  }

  // bad comparator format
  public void testFailureBadComparatorNames() throws IOException {
    try {
      writer = new Writer(out, BLOCK_SIZE, compression, "badcmp", conf);
      Assert.fail("Failed to catch unsupported comparator names");
    }
    catch (Exception e) {
      // noop, expecting exceptions
      e.printStackTrace();
View Full Code Here

  // jclass that doesn't exist
  public void testFailureBadJClassNames() throws IOException {
    try {
      writer =
          new Writer(out, BLOCK_SIZE, compression,
              "jclass: some.non.existence.clazz", conf);
      Assert.fail("Failed to catch unsupported comparator names");
    }
    catch (Exception e) {
      // noop, expecting exceptions
View Full Code Here

  // class exists but not a RawComparator
  public void testFailureBadJClasses() throws IOException {
    try {
      writer =
          new Writer(out, BLOCK_SIZE, compression,
              "jclass:org.apache.hadoop.zebra.tfile.Chunk", conf);
      Assert.fail("Failed to catch unsupported comparator names");
    }
    catch (Exception e) {
      // noop, expecting exceptions
View Full Code Here

  public void setUp() throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
  }
View Full Code Here

  public void setUp() throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
    writer.append("keyZ".getBytes(), "valueZ".getBytes());
    writer.append("keyM".getBytes(), "valueM".getBytes());
    writer.append("keyN".getBytes(), "valueN".getBytes());
    writer.append("keyA".getBytes(), "valueA".getBytes());
    closeOutput();
View Full Code Here

  public void setUp() throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
  }
View Full Code Here

    if (skip)
      return;
    closeOutput();
    out = fs.create(path);
    try {
      writer = new Writer(out, BLOCK_SIZE, "BAD", comparator, conf);
      Assert.fail("Error on handling invalid compression codecs.");
    }
    catch (Exception e) {
      // noop, expecting exceptions
      // e.printStackTrace();
View Full Code Here

    closeOutput();
    out = fs.create(path);
    out.write(123);

    try {
      writer = new Writer(out, BLOCK_SIZE, compression, comparator, conf);
      Assert.fail("Failed to catch file write not at position 0.");
    }
    catch (Exception e) {
      // noop, expecting exceptions
    }
View Full Code Here

  private void createTFile() throws IOException {
    long totalBytes = 0;
    FSDataOutputStream fout = createFSOutput(path, fs);
    try {
      Writer writer =
          new Writer(fout, options.minBlockSize, options.compress, "memcmp",
              conf);
      try {
        BytesWritable key = new BytesWritable();
        BytesWritable val = new BytesWritable();
        timer.start();
        for (long i = 0; true; ++i) {
          if (i % 1000 == 0) { // test the size for every 1000 rows.
            if (fs.getFileStatus(path).getLen() >= options.fileSize) {
              break;
            }
          }
          kvGen.next(key, val, false);
          writer.append(key.get(), 0, key.getSize(), val.get(), 0, val
              .getSize());
          totalBytes += key.getSize();
          totalBytes += val.getSize();
        }
        timer.stop();
      }
      finally {
        writer.close();
      }
    }
    finally {
      fout.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.zebra.tfile.TFile.Writer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.