// If not in cache, create a new one
if(context == null) {
context = new OutputContext();
OutputFormat mainOutputFormat;
try {
mainOutputFormat = ((OutputFormat) ReflectionUtils.newInstance(
this.context.getOutputFormatClass(), this.context.getConfiguration()));
} catch(ClassNotFoundException e1) {
throw new RuntimeException(e1);
}
ProxyOutputCommitter baseOutputCommitter = ((ProxyOutputCommitter) mainOutputFormat
.getOutputCommitter(this.context));
// The trick is to create a new Job for each output
Configuration c = new Configuration(this.context.getConfiguration());
Job job = new Job(c);
Class<?> keyClass = getNamedOutputKeyClass(this.context, baseFileName);
if(keyClass == null && getDefaultNamedOutputKeyClass(this.context) == null) {
throw new InvalidNamedOutputException("No pre-configured named output for this name / no default named output format specified.");
}
job.setOutputKeyClass(keyClass == null ? getDefaultNamedOutputKeyClass(this.context) : keyClass);
Class<?> valueClass = getNamedOutputValueClass(this.context, baseFileName);
job.setOutputValueClass(valueClass == null ? getDefaultNamedOutputValueClass(this.context)
: valueClass);
// Check possible specific context for the output
setSpecificNamedOutputContext(this.context.getConfiguration(), job, baseFileName);
TaskAttemptContext taskContext;
try {
taskContext = TaskAttemptContextFactory.get(job.getConfiguration(),
this.context.getTaskAttemptID());
} catch(Exception e) {
throw new IOException(e);
}
// First we change the output dir for the new OutputFormat that we will
// create
// We put it inside the main output work path -> in case the Job fails,
// everything will be discarded
taskContext.getConfiguration().set("mapred.output.dir",
baseOutputCommitter.getBaseDir() + "/" + baseFileName);
// This is for Hadoop 2.0 :
taskContext.getConfiguration().set("mapreduce.output.fileoutputformat.outputdir",
baseOutputCommitter.getBaseDir() + "/" + baseFileName);
context.taskAttemptContext = taskContext;
// Load the OutputFormat instance
String outputFormatFile = getNamedOutputFormatInstanceFile(this.context, baseFileName);
if(outputFormatFile == null) {
outputFormatFile = getDefaultNamedOutputFormatInstanceFile(this.context);
}
OutputFormat outputFormat = InstancesDistributor.loadInstance(
context.taskAttemptContext.getConfiguration(), OutputFormat.class, outputFormatFile, true);
// We have to create a JobContext for meeting the contract of the
// OutputFormat
JobContext jobContext;
try {
jobContext = JobContextFactory.get(taskContext.getConfiguration(), taskContext.getJobID());
} catch(Exception e) {
throw new IOException(e);
}
context.jobContext = jobContext;
// The contract of the OutputFormat is to check the output specs
outputFormat.checkOutputSpecs(jobContext);
// We get the output committer so we can call it later
context.outputCommitter = outputFormat.getOutputCommitter(taskContext);
// Save the RecordWriter to cache it
context.recordWriter = outputFormat.getRecordWriter(taskContext);
// if counters are enabled, wrap the writer with context
// to increment counters
if(countersEnabled) {
context.recordWriter = new RecordWriterWithCounter(context.recordWriter, baseFileName,