Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.OutputCommitter


  }

  private static class ImporterOutputFormat extends HFileOutputFormat {
    @Override
    public OutputCommitter getOutputCommitter(TaskAttemptContext context) throws IOException {
      final OutputCommitter baseOutputCommitter = super.getOutputCommitter(context);

      return new OutputCommitter() {
        @Override
        public void setupJob(JobContext jobContext) throws IOException {
          baseOutputCommitter.setupJob(jobContext);
        }

        @Override
        public void setupTask(TaskAttemptContext taskContext) throws IOException {
          baseOutputCommitter.setupTask(taskContext);
        }

        @Override
        public boolean needsTaskCommit(TaskAttemptContext taskContext) throws IOException {
          return baseOutputCommitter.needsTaskCommit(taskContext);
        }

        @Override
        public void commitTask(TaskAttemptContext taskContext) throws IOException {
          baseOutputCommitter.commitTask(taskContext);
        }

        @Override
        public void abortTask(TaskAttemptContext taskContext) throws IOException {
          baseOutputCommitter.abortTask(taskContext);
        }

        @Override
        public void abortJob(JobContext jobContext, JobStatus.State state) throws IOException {
          try {
            baseOutputCommitter.abortJob(jobContext, state);
          } finally {
            cleanupScratch(jobContext);
          }
        }

        @Override
        public void commitJob(JobContext jobContext) throws IOException {
          try {
            baseOutputCommitter.commitJob(jobContext);
            Configuration conf = jobContext.getConfiguration();
            try {
              //import hfiles
              new LoadIncrementalHFiles(conf)
                .doBulkLoad(HFileOutputFormat.getOutputPath(jobContext),
                  new HTable(conf,
                    conf.get(HBaseConstants.PROPERTY_OUTPUT_TABLE_NAME_KEY)));
            } catch (Exception e) {
              throw new IOException("BulkLoad failed.", e);
            }
          } finally {
            cleanupScratch(jobContext);
          }
        }

        @Override
        public void cleanupJob(JobContext context) throws IOException {
          try {
            baseOutputCommitter.cleanupJob(context);
          } finally {
            cleanupScratch(context);
          }
        }
View Full Code Here


  private static class NullOutputFormat<K, V> extends
    org.apache.hadoop.mapreduce.lib.output.NullOutputFormat<K, V> {

    @Override
    public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
      return new OutputCommitter() {
        public void abortTask(TaskAttemptContext taskContext) {
        }

        public void cleanupJob(JobContext jobContext) {
        }
View Full Code Here

    publishTest(job);
  }

  public void publishTest(Job job) throws Exception {
    OutputCommitter committer = new FileOutputCommitterContainer(job, null);
    committer.commitJob(job);

    Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
    assertNotNull(part);

    StorerInfo storer = InternalUtil.extractStorerInfo(part.getSd(), part.getParameters());
View Full Code Here

      if (dynamicPartitioningUsed){
        for (RecordWriter<? super WritableComparable<?>, ? super Writable> bwriter : baseDynamicWriters.values()){
          bwriter.close(context);
        }
        for (HCatOutputStorageDriver osd : baseDynamicStorageDrivers.values()){
          OutputCommitter baseOutputCommitter = osd.getOutputFormat().getOutputCommitter(context);
          if (baseOutputCommitter.needsTaskCommit(context)){
            baseOutputCommitter.commitTask(context);
          }
        }
      } else {
        baseWriter.close(context);
      }
View Full Code Here

         
          HCatOutputStorageDriver localOsd = createDynamicStorageDriver(dynamicPartValues);
          RecordWriter<? super WritableComparable<?>, ? super Writable> baseRecordWriter
            = localOsd.getOutputFormat().getRecordWriter(context);
          localOsd.setupOutputCommitterJob(context);
          OutputCommitter baseOutputCommitter = localOsd.getOutputFormat().getOutputCommitter(context);
          baseOutputCommitter.setupTask(context);
          prepareForStorageDriverOutput(localOsd,context);
          baseDynamicWriters.put(dynHashCode, baseRecordWriter);
          baseDynamicStorageDrivers.put(dynHashCode,localOsd);
        }
View Full Code Here

    publishTest(job);
  }

  public void publishTest(Job job) throws Exception {
    OutputCommitter committer = new HCatOutputCommitter(job,null);
    committer.cleanupJob(job);

    Partition part = client.getPartition(dbName, tblName, Arrays.asList("p1"));
    assertNotNull(part);

    StorerInfo storer = InitializeInput.extractStorerInfo(part.getSd(),part.getParameters());
View Full Code Here

  @Override
  public void checkOutputSpecs(JobContext context) { }
 
  @Override
  public OutputCommitter getOutputCommitter(TaskAttemptContext context) {
    return new OutputCommitter() {
      public void abortTask(TaskAttemptContext taskContext) { }
      public void cleanupJob(JobContext jobContext) { }
      public void commitTask(TaskAttemptContext taskContext) { }
      public boolean needsTaskCommit(TaskAttemptContext taskContext) {
        return false;
View Full Code Here

    TaskAttemptID taskId = new TaskAttemptID();
    RecordReader<NullWritable, GridmixRecord> reader = new FakeRecordReader();

    LoadRecordGkGrWriter writer = new LoadRecordGkGrWriter();

    OutputCommitter committer = new CustomOutputCommitter();
    StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
    LoadSplit split = getLoadSplit();

    MapContext<NullWritable, GridmixRecord, GridmixKey, GridmixRecord> mapContext = new MapContextImpl<NullWritable, GridmixRecord, GridmixKey, GridmixRecord>(
            conf, taskId, reader, writer, committer, reporter, split);
View Full Code Here

    Counter counter = new GenericCounter();
    Counter inputValueCounter = new GenericCounter();
    LoadRecordWriter output = new LoadRecordWriter();

    OutputCommitter committer = new CustomOutputCommitter();

    StatusReporter reporter = new DummyReporter();
    RawComparator<GridmixKey> comparator = new FakeRawComparator();

    ReduceContext<GridmixKey, GridmixRecord, NullWritable, GridmixRecord> reduceContext = new ReduceContextImpl<GridmixKey, GridmixRecord, NullWritable, GridmixRecord>(
View Full Code Here

    CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
    conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
    TaskAttemptID taskId = new TaskAttemptID();
    FakeRecordLLReader reader = new FakeRecordLLReader();
    LoadRecordGkNullWriter writer = new LoadRecordGkNullWriter();
    OutputCommitter committer = new CustomOutputCommitter();
    StatusReporter reporter = new TaskAttemptContextImpl.DummyReporter();
    SleepSplit split = getSleepSplit();
    MapContext<LongWritable, LongWritable, GridmixKey, NullWritable> mapcontext = new MapContextImpl<LongWritable, LongWritable, GridmixKey, NullWritable>(
            conf, taskId, reader, writer, committer, reporter, split);
    Context context = new WrappedMapper<LongWritable, LongWritable, GridmixKey, NullWritable>()
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.OutputCommitter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.