this.context.getOutputFormatClass(), this.context.getConfiguration()));
} catch(ClassNotFoundException e1) {
throw new RuntimeException(e1);
}
ProxyOutputCommitter baseOutputCommitter = ((ProxyOutputCommitter) mainOutputFormat
.getOutputCommitter(this.context));
// The trick is to create a new Job for each output
Configuration c = new Configuration(this.context.getConfiguration());
Job job = new Job(c);
Class<?> keyClass = getNamedOutputKeyClass(this.context, baseFileName);
if(keyClass == null && getDefaultNamedOutputKeyClass(this.context) == null) {
throw new InvalidNamedOutputException("No pre-configured named output for this name / no default named output format specified.");
}
job.setOutputKeyClass(keyClass == null ? getDefaultNamedOutputKeyClass(this.context) : keyClass);
Class<?> valueClass = getNamedOutputValueClass(this.context, baseFileName);
job.setOutputValueClass(valueClass == null ? getDefaultNamedOutputValueClass(this.context)
: valueClass);
// Check possible specific context for the output
setSpecificNamedOutputContext(this.context.getConfiguration(), job, baseFileName);
TaskAttemptContext taskContext;
try {
taskContext = TaskAttemptContextFactory.get(job.getConfiguration(),
this.context.getTaskAttemptID());
} catch(Exception e) {
throw new IOException(e);
}
// First we change the output dir for the new OutputFormat that we will
// create
// We put it inside the main output work path -> in case the Job fails,
// everything will be discarded
taskContext.getConfiguration().set("mapred.output.dir",
baseOutputCommitter.getBaseDir() + "/" + baseFileName);
// This is for Hadoop 2.0 :
taskContext.getConfiguration().set("mapreduce.output.fileoutputformat.outputdir",
baseOutputCommitter.getBaseDir() + "/" + baseFileName);
context.taskAttemptContext = taskContext;
// Load the OutputFormat instance
String outputFormatFile = getNamedOutputFormatInstanceFile(this.context, baseFileName);
if(outputFormatFile == null) {