File temp = File.createTempFile("tmp", "tmp");
temp.deleteOnExit();
FileOutputStream fos = new FileOutputStream(temp);
DataOutputStream dos = new DataOutputStream(fos);
InterRecordWriter writer = new InterRecordWriter(dos);
// We add these lines because a part of the InterStorage logic
// is the ability to seek to the next Tuple based on a magic set
// of bytes. This emulates the random byes that will be present
// at the beginning of a split.
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
for (int i = 0; i < sz; i++) {
SchemaTuple<?> st = (SchemaTuple<?>)tf.newTuple();
fillWithData(st);
writer.write(null, st);
written.add(st);
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
dos.writeByte(r.nextInt());
}
writer.close(null);
Configuration conf = new Configuration();
conf.set("fs.default.name", "file:///");
TaskAttemptID taskId = HadoopShims.createTaskAttemptID("jt", 1, true, 1, 1);