new byte[0], "\\N".getBytes("UTF-8")};
RCFileOutputFormat.setColumnNumber(conf, 8);
RCFile.Writer writer = new RCFile.Writer(fs, conf, file, null,
new DefaultCodec());
BytesRefArrayWritable bytes = new BytesRefArrayWritable(record_1.length);
for (int i = 0; i < record_1.length; i++) {
BytesRefWritable cu = new BytesRefWritable(record_1[i], 0,
record_1[i].length);
bytes.set(i, cu);
}
writer.append(bytes);
BytesRefArrayWritable bytes2 = new BytesRefArrayWritable(record_2.length);
for (int i = 0; i < record_2.length; i++) {
BytesRefWritable cu = new BytesRefWritable(record_2[i], 0,
record_2[i].length);
bytes2.set(i, cu);
}
writer.append(bytes2);
writer.close();
BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};
RCFileInputDriver sd = new RCFileInputDriver();
JobContext jc = new JobContext(conf, new JobID());
sd.setInputPath(jc, file.toString());
InputFormat<?,?> iF = sd.getInputFormat(null);
InputSplit split = iF.getSplits(jc).get(0);
sd.setOriginalSchema(jc, buildHiveSchema());
sd.setOutputSchema(jc, buildReorderedSchema());
sd.initialize(jc, getProps());
Map<String,String> map = new HashMap<String,String>(1);
map.put("part1", "first-part");
sd.setPartitionValues(jc, map);
conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
RecordReader<?,?> rr = iF.createRecordReader(split,tac);
rr.initialize(split, tac);
HowlRecord[] tuples = getReorderedCols();
for(int j=0; j < 2; j++){
Assert.assertTrue(rr.nextKeyValue());
BytesRefArrayWritable w = (BytesRefArrayWritable)rr.getCurrentValue();
Assert.assertFalse(bytesArr[j].equals(w));
Assert.assertEquals(w.size(), 8);
HowlRecord t = sd.convertToHowlRecord(null,w);
Assert.assertEquals(7, t.size());
Assert.assertEquals(t,tuples[j]);
}
assertFalse(rr.nextKeyValue());