Package org.apache.hadoop.mapreduce.task

Examples of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl


    }

    static public TaskAttemptContext createTaskAttemptContext(Configuration conf,
                                TaskAttemptID taskId) {
        if (conf instanceof JobConf) {
            return new TaskAttemptContextImpl(new JobConf(conf), taskId);
        } else {
            return new TaskAttemptContextImpl(conf, taskId);
        }
    }
View Full Code Here


    conf.set(DBConfiguration.OUTPUT_TABLE_NAME_PROPERTY, tableName);
    conf.set(DBConfiguration.OUTPUT_FIELD_NAMES_PROPERTY, StringUtils.join(columnNames, ','));
    conf.set(ExportJobBase.SQOOP_EXPORT_UPDATE_COL_KEY, StringUtils.join(updateKeyColumns, ','));
    conf.set(SQLServerManager.TABLE_HINTS_PROP, "NOLOCK");
    conf.set(SQLServerManager.IDENTITY_INSERT_PROP, "true");
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
    SqlServerUpsertOutputFormat outputFormat = new SqlServerUpsertOutputFormat();
    SqlServerUpsertRecordWriter recordWriter = outputFormat.new SqlServerUpsertRecordWriter(context);
    assertEquals("SET IDENTITY_INSERT #myTable ON " +
      "MERGE INTO #myTable AS _target USING ( VALUES ( ?, ?, ? ) ) AS _source ( FirstColumn, SecondColumn, ThirdColumn ) ON _source.FirstColumn = _target.FirstColumn" +
      "  WHEN MATCHED THEN UPDATE SET _target.SecondColumn = _source.SecondColumn, _target.ThirdColumn = _source.ThirdColumn" +
View Full Code Here

          context.getApplicationId().getId(),
          ((jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR, false) ?
              TaskType.MAP : TaskType.REDUCE)),
          0, context.getDAGAttemptNumber());

      TaskAttemptContext taskContext = new TaskAttemptContextImpl(jobConf,
          taskAttemptID);
      try {
        OutputFormat outputFormat = ReflectionUtils.newInstance(taskContext
            .getOutputFormatClass(), jobConf);
        committer = outputFormat.getOutputCommitter(taskContext);
      } catch (Exception e) {
        throw new TezUncheckedException(e);
      }
View Full Code Here

        + String.valueOf(context.getVertexIndex()),
        context.getApplicationId().getId(),
        ((jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR, false) ?
            TaskType.MAP : TaskType.REDUCE)),
        taskIndex, attemptId);
    TaskAttemptContext taskContext = new TaskAttemptContextImpl(jobConf,
        taskAttemptID);
    committer.recoverTask(taskContext);
  }
View Full Code Here

          setDaemon(true);
        }
        public void run() {
          long records = 0;
          try {
            TaskAttemptContext context = new TaskAttemptContextImpl(
              job.getConfiguration(), new TaskAttemptID());
            RecordReader<Text, Text> reader =
              inFormat.createRecordReader(splits.get(sampleStep * idx),
              context);
            reader.initialize(splits.get(sampleStep * idx), context);
View Full Code Here

        org.apache.hadoop.mapreduce.TaskID taskId =
          new org.apache.hadoop.mapreduce.TaskID(jobId, TaskType.MAP, 0);
        org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID =
          new org.apache.hadoop.mapreduce.TaskAttemptID(taskId, 0);
        org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
          new TaskAttemptContextImpl(conf, taskAttemptID);
        OutputFormat outputFormat =
          ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), conf);
        committer = outputFormat.getOutputCommitter(taskContext);
      } else {
        committer = ReflectionUtils.newInstance(conf.getClass(
          "mapred.output.committer.class", FileOutputCommitter.class,
          org.apache.hadoop.mapred.OutputCommitter.class), conf);
View Full Code Here

    if (taskAttemptId == null) {
      // If the caller is not within a mapper/reducer (if reading from the table via CliDriver),
      // then TaskAttemptID.forname() may return NULL. Fall back to using default constructor.
      taskAttemptId = new TaskAttemptID();
    }
    return new TaskAttemptContextImpl(conf, taskAttemptId) {
      @Override
      public void progress() {
        progressable.progress();
      }
    };
View Full Code Here

  public static TaskAttemptContext createDummyMapTaskAttemptContext(
      Configuration conf) {
    TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, tid.toString());
    return new TaskAttemptContextImpl(conf, tid);   
  }
View Full Code Here

    public void checkOutputSpecs(JobContext job)
        throws FileAlreadyExistsException, IOException{
      super.checkOutputSpecs(job);
      // creating dummy TaskAttemptID
      TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.JOB_SETUP, 0, 0);
      getOutputCommitter(new TaskAttemptContextImpl(job.getConfiguration(), tid)).
        setupJob(job);
    }
View Full Code Here

    Job job = Job.getInstance();
    FileOutputFormat.setOutputPath(job, outDir);
    Configuration conf = job.getConfiguration();
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

    // setup
    committer.setupJob(jContext);
    committer.setupTask(tContext);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.