}
job.setOutput(new Path(output), new HadoopOutputFormat(NullOutputFormat.class), ITuple.class, NullWritable.class);
// The reducer will just emit the tuple to the corresponding Category output
job.setTupleReducer(new TupleReducer<ITuple, NullWritable>() {
ITuple outTuple = new Tuple(OUT_SCHEMA);
public void reduce(ITuple group, Iterable<ITuple> tuples, TupleMRContext context, Collector collector)
throws IOException, InterruptedException, TupleMRException {
for(ITuple tuple: tuples) {
Category category = (Category) tuple.get("category");
outTuple.set("line", tuple.get("line"));
outTuple.set("text", tuple.get("text"));
outTuple.set("title", tuple.get("title"));
collector.getNamedOutput(category.toString().toLowerCase()).write(outTuple, NullWritable.get());
}
}
});