Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.OutputFormat


      theRecordWriter.write(key2, val2);
    } finally {
      theRecordWriter.close(tContext);
    }
   
    OutputFormat outputFormat = ReflectionUtils.newInstance(
        tContext.getOutputFormatClass(), conf);
    OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
    committer.commitTask(tContext);
  }
View Full Code Here


          .newTaskId(jobId, 0, TaskType.MAP);
      org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = MRBuilderUtils
          .newTaskAttemptId(taskID, 0);
      TaskAttemptContext taskContext = new TaskAttemptContextImpl(conf,
          TypeConverter.fromYarn(attemptID));
      OutputFormat outputFormat;
      try {
        outputFormat = ReflectionUtils.newInstance(taskContext
            .getOutputFormatClass(), conf);
        committer = outputFormat.getOutputCommitter(taskContext);
      } catch (Exception e) {
        throw new YarnException(e);
      }
    } else {
      committer = ReflectionUtils.newInstance(conf.getClass(
View Full Code Here

    // If not in cache, create a new one
    if(context == null) {

      context = new OutputContext();

      OutputFormat mainOutputFormat;

      try {
        mainOutputFormat = ((OutputFormat) ReflectionUtils.newInstance(
            this.context.getOutputFormatClass(), this.context.getConfiguration()));
      } catch(ClassNotFoundException e1) {
        throw new RuntimeException(e1);
      }

      ProxyOutputCommitter baseOutputCommitter = ((ProxyOutputCommitter) mainOutputFormat
          .getOutputCommitter(this.context));

      // The trick is to create a new Job for each output
      Job job = new Job(this.context.getConfiguration());
      job.setOutputKeyClass(getNamedOutputKeyClass(this.context, baseFileName));
      job.setOutputValueClass(getNamedOutputValueClass(this.context, baseFileName));
      // Check possible specific context for the output
      setSpecificNamedOutputContext(this.context.getConfiguration(), job, baseFileName);
      TaskAttemptContext taskContext = new TaskAttemptContext(job.getConfiguration(),
          this.context.getTaskAttemptID());

      // First we change the output dir for the new OutputFormat that we will
      // create
      // We put it inside the main output work path -> in case the Job fails,
      // everything will be discarded
      taskContext.getConfiguration().set("mapred.output.dir",
          baseOutputCommitter.getBaseDir() + "/" + baseFileName);
      context.taskAttemptContext = taskContext;

      // Load the OutputFormat instance
      OutputFormat outputFormat = InstancesDistributor.loadInstance(
          context.taskAttemptContext.getConfiguration(), OutputFormat.class,
          getNamedOutputFormatInstanceFile(this.context, baseFileName), true);
      // We have to create a JobContext for meeting the contract of the
      // OutputFormat
      JobContext jobContext = new JobContext(taskContext.getConfiguration(),
          taskContext.getJobID());
      context.jobContext = jobContext;
      // The contract of the OutputFormat is to check the output specs
      outputFormat.checkOutputSpecs(jobContext);
      // We get the output committer so we can call it later
      context.outputCommitter = outputFormat.getOutputCommitter(taskContext);
      // Save the RecordWriter to cache it
      context.recordWriter = outputFormat.getRecordWriter(taskContext);

      // if counters are enabled, wrap the writer with context
      // to increment counters
      if(countersEnabled) {
        context.recordWriter = new RecordWriterWithCounter(context.recordWriter,
View Full Code Here

  @SuppressWarnings("unchecked")
  private static RecordWriter<Writable, Writable>
  createThriftWriter(Class<?> thriftClass, final File file)
                    throws IOException, InterruptedException {

    OutputFormat outputFormat = (
      new RCFileThriftOutputFormat(ThriftUtils.getTypeRef(thriftClass.getName())) {
        @Override
        public Path getDefaultWorkFile(TaskAttemptContext context,
            String extension) throws IOException {
          return new Path(file.toURI().toString());
        }
    });

    Configuration conf = new Configuration();
    // TODO: figure out why Gzip or BZip2 compression fails on OSX
    //conf.setBoolean("mapred.output.compress", true);
    //conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.BZip2Codec");


    return outputFormat.getRecordWriter(
        HadoopCompat.newTaskAttemptContext(conf, new TaskAttemptID()));
  }
View Full Code Here

  @SuppressWarnings("unchecked")
  private static RecordWriter<Writable, Writable>
  createProtoWriter(Class<?> protoClass, final File file)
                    throws IOException, InterruptedException {

    OutputFormat outputFormat = (
      new RCFileProtobufOutputFormat(Protobufs.getTypeRef(protoClass.getName())) {
        @Override
        public Path getDefaultWorkFile(TaskAttemptContext context,
            String extension) throws IOException {
          return new Path(file.toURI().toString());
        }
    });

    Configuration conf = new Configuration();
    // TODO: figure out why Gzip or BZip2 compression fails on OSX
    // conf.setBoolean("mapred.output.compress", true);
    // conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.BZip2Codec");

    return outputFormat.getRecordWriter(
        HadoopCompat.newTaskAttemptContext(conf, new TaskAttemptID()));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.OutputFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.