Package org.apache.hadoop.hive.ql.io

Examples of org.apache.hadoop.hive.ql.io.RecordUpdater


    }

    // write the base
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
        .inspector(inspector).bucket(BUCKET);
    RecordUpdater ru = of.getRecordUpdater(root,
        options.writingBase(true).maximumTransactionId(100));
    for(String v: values) {
      ru.insert(0, new MyRow(v));
    }
    ru.close(false);

    // write a delta
    ru = of.getRecordUpdater(root, options.writingBase(false)
        .minimumTransactionId(200).maximumTransactionId(200));
    ru.update(200, 0, 0, new MyRow("update 1"));
    ru.update(200, 0, 2, new MyRow("update 2"));
    ru.update(200, 0, 3, new MyRow("update 3"));
    ru.delete(200, 0, 7);
    ru.delete(200, 0, 8);
    ru.close(false);

    ValidTxnList txnList = new ValidTxnListImpl("200:");
    AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);

    assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
View Full Code Here


    // write a delta
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
        .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
        .bucket(BUCKET).inspector(inspector).filesystem(fs);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    values = new String[]{"0.0", null, null, "1.1", null, null, null,
        "ignore.7"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 9);
    ru.close(false);

    // write a delta
    options = options.minimumTransactionId(2).maximumTransactionId(2);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 8);
    ru.close(false);

    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
View Full Code Here

        .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
        .bucket(BUCKET).inspector(inspector).filesystem(fs);
    options.orcOptions(OrcFile.writerOptions(conf)
      .stripeSize(1).blockPadding(false).compress(CompressionKind.NONE)
      .memory(mgr));
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values= new String[]{"ignore.1", "0.1", "ignore.2", "ignore.3",
        "2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(0, new BigRow(i, i, values[i], i, i));
    }
    ru.close(false);

    // write a delta
    options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{"0.0", null, null, "1.1", null, null, null,
        "ignore.7"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 9);
    ru.close(false);

    // write a delta
    options.minimumTransactionId(2).maximumTransactionId(2);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 8);
    ru.close(false);

    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
View Full Code Here

    // write a delta
    AcidOutputFormat.Options options =
        new AcidOutputFormat.Options(conf)
            .bucket(BUCKET).inspector(inspector).filesystem(fs)
            .writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values = new String[]{"a", "b", "c", "d", "e"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(1, new MyRow(values[i]));
    }
    ru.close(false);

    // write a delta
    options.minimumTransactionId(2).maximumTransactionId(2);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{"f", "g", "h", "i", "j"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(2, new MyRow(values[i]));
    }
    ru.close(false);

    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
View Full Code Here

    // write a base
    AcidOutputFormat.Options options =
        new AcidOutputFormat.Options(conf)
            .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
            .bucket(BUCKET).inspector(inspector).filesystem(fs);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values= new String[]{"1", "2", "3", "4", "5"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(0, new MyRow(values[i]));
    }
    ru.close(false);

    // write a delta
    options.writingBase(false).minimumTransactionId(10)
        .maximumTransactionId(19);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{"6", "7", "8"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(1, new MyRow(values[i]));
    }
    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.input.dir", root.toString());
    job.set("bucket_count", "2");

    // read the keys before the delta is flushed
    InputSplit[] splits = inf.getSplits(job, 1);
    assertEquals(2, splits.length);
    org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr =
        inf.getRecordReader(splits[0], job, Reporter.NULL);
    NullWritable key = rr.createKey();
    OrcStruct value = rr.createValue();
    System.out.println("Looking at split " + splits[0]);
    for(int i=1; i < 6; ++i) {
      System.out.println("Checking row " + i);
      assertEquals(true, rr.next(key, value));
      assertEquals(Integer.toString(i), value.getFieldValue(0).toString());
    }
    assertEquals(false, rr.next(key, value));

    ru.flush();
    ru.flush();
    values = new String[]{"9", "10"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(3, new MyRow(values[i]));
    }
    ru.flush();

    splits = inf.getSplits(job, 1);
    assertEquals(2, splits.length);
    rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
    Path sideFile = new Path(root +
View Full Code Here

        .writingBase(false)
        .minimumTransactionId(10)
        .maximumTransactionId(19)
        .inspector(inspector)
        .reporter(Reporter.NULL);
    RecordUpdater updater = new OrcRecordUpdater(root, options);
    updater.insert(11, new MyRow("first"));
    updater.insert(11, new MyRow("second"));
    updater.insert(11, new MyRow("third"));
    updater.flush();
    updater.insert(12, new MyRow("fourth"));
    updater.insert(12, new MyRow("fifth"));
    updater.flush();
    Path bucketPath = AcidUtils.createFilename(root, options);
    Path sidePath = OrcRecordUpdater.getSideFile(bucketPath);
    DataInputStream side = fs.open(sidePath);

    // read the stopping point for the first flush and make sure we only see
    // 3 rows
    long len = side.readLong();
    Reader reader = OrcFile.createReader(bucketPath,
        new OrcFile.ReaderOptions(conf).filesystem(fs).maxLength(len));
    assertEquals(3, reader.getNumberOfRows());

    // read the second flush and make sure we see all 5 rows
    len = side.readLong();
    side.close();
    reader = OrcFile.createReader(bucketPath,
        new OrcFile.ReaderOptions(conf).filesystem(fs).maxLength(len));
    assertEquals(5, reader.getNumberOfRows());
    RecordReader rows = reader.rows();

    // check the contents of the file
    assertEquals(true, rows.hasNext());
    OrcStruct row = (OrcStruct) rows.next(null);
    assertEquals(OrcRecordUpdater.INSERT_OPERATION,
        OrcRecordUpdater.getOperation(row));
    assertEquals(11, OrcRecordUpdater.getCurrentTransaction(row));
    assertEquals(11, OrcRecordUpdater.getOriginalTransaction(row));
    assertEquals(10, OrcRecordUpdater.getBucket(row));
    assertEquals(0, OrcRecordUpdater.getRowId(row));
    assertEquals("first",
        OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(1, OrcRecordUpdater.getRowId(row));
    assertEquals(10, OrcRecordUpdater.getBucket(row));
    assertEquals("second",
        OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(2, OrcRecordUpdater.getRowId(row));
    assertEquals(10, OrcRecordUpdater.getBucket(row));
    assertEquals("third",
        OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(12, OrcRecordUpdater.getCurrentTransaction(row));
    assertEquals(12, OrcRecordUpdater.getOriginalTransaction(row));
    assertEquals(10, OrcRecordUpdater.getBucket(row));
    assertEquals(0, OrcRecordUpdater.getRowId(row));
    assertEquals("fourth",
        OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(1, OrcRecordUpdater.getRowId(row));
    assertEquals("fifth",
        OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(false, rows.hasNext());

    // add one more record and close
    updater.insert(20, new MyRow("sixth"));
    updater.close(false);
    reader = OrcFile.createReader(bucketPath,
        new OrcFile.ReaderOptions(conf).filesystem(fs));
    assertEquals(6, reader.getNumberOfRows());
    assertEquals(false, fs.exists(sidePath));
  }
View Full Code Here

        .writingBase(false)
        .minimumTransactionId(100)
        .maximumTransactionId(100)
        .inspector(inspector)
        .reporter(Reporter.NULL);
    RecordUpdater updater = new OrcRecordUpdater(root, options);
    updater.update(100, 10, 30, new MyRow("update"));
    updater.delete(100, 40, 60);
    updater.close(false);
    Path bucketPath = AcidUtils.createFilename(root, options);

    Reader reader = OrcFile.createReader(bucketPath,
        new OrcFile.ReaderOptions(conf).filesystem(fs));
    assertEquals(2, reader.getNumberOfRows());
View Full Code Here

    }

    // write the base
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
        .inspector(inspector).bucket(BUCKET);
    RecordUpdater ru = of.getRecordUpdater(root,
        options.writingBase(true).maximumTransactionId(100));
    for(String v: values) {
      ru.insert(0, new MyRow(v));
    }
    ru.close(false);

    // write a delta
    ru = of.getRecordUpdater(root, options.writingBase(false)
        .minimumTransactionId(200).maximumTransactionId(200));
    ru.update(200, 0, 0, new MyRow("update 1"));
    ru.update(200, 0, 2, new MyRow("update 2"));
    ru.update(200, 0, 3, new MyRow("update 3"));
    ru.delete(200, 0, 7);
    ru.delete(200, 0, 8);
    ru.close(false);

    ValidTxnList txnList = new ValidTxnListImpl("200:");
    AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);

    assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
View Full Code Here

    // write a delta
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
        .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
        .bucket(BUCKET).inspector(inspector).filesystem(fs);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    values = new String[]{"0.0", null, null, "1.1", null, null, null,
        "ignore.7"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 9);
    ru.close(false);

    // write a delta
    options = options.minimumTransactionId(2).maximumTransactionId(2);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 8);
    ru.close(false);

    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
View Full Code Here

        .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
        .bucket(BUCKET).inspector(inspector).filesystem(fs);
    options.orcOptions(OrcFile.writerOptions(conf)
      .stripeSize(1).blockPadding(false).compress(CompressionKind.NONE)
      .memory(mgr));
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values= new String[]{"ignore.1", "0.1", "ignore.2", "ignore.3",
        "2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"};
    for(int i=0; i < values.length; ++i) {
      ru.insert(0, new BigRow(i, i, values[i], i, i));
    }
    ru.close(false);

    // write a delta
    options.writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{"0.0", null, null, "1.1", null, null, null,
        "ignore.7"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(1, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 9);
    ru.close(false);

    // write a delta
    options.minimumTransactionId(2).maximumTransactionId(2);
    ru = of.getRecordUpdater(root, options);
    values = new String[]{null, null, "1.0", null, null, null, null, "3.1"};
    for(int i=0; i < values.length; ++i) {
      if (values[i] != null) {
        ru.update(2, 0, i, new BigRow(i, i, values[i], i, i));
      }
    }
    ru.delete(100, 0, 8);
    ru.close(false);

    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.io.RecordUpdater

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.