ru = of.getRecordUpdater(root, options);
values = new String[]{"6", "7", "8"};
for(int i=0; i < values.length; ++i) {
ru.insert(1, new MyRow(values[i]));
}
InputFormat inf = new OrcInputFormat();
JobConf job = new JobConf();
job.set("mapred.input.dir", root.toString());
job.set("bucket_count", "2");
// read the keys before the delta is flushed
InputSplit[] splits = inf.getSplits(job, 1);
assertEquals(2, splits.length);
org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr =
inf.getRecordReader(splits[0], job, Reporter.NULL);
NullWritable key = rr.createKey();
OrcStruct value = rr.createValue();
System.out.println("Looking at split " + splits[0]);
for(int i=1; i < 6; ++i) {
System.out.println("Checking row " + i);
assertEquals(true, rr.next(key, value));
assertEquals(Integer.toString(i), value.getFieldValue(0).toString());
}
assertEquals(false, rr.next(key, value));
ru.flush();
ru.flush();
values = new String[]{"9", "10"};
for(int i=0; i < values.length; ++i) {
ru.insert(3, new MyRow(values[i]));
}
ru.flush();
splits = inf.getSplits(job, 1);
assertEquals(2, splits.length);
rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
Path sideFile = new Path(root +
"/delta_0000010_0000019/bucket_00001_flush_length");
assertEquals(true, fs.exists(sideFile));
assertEquals(24, fs.getFileStatus(sideFile).getLen());