Package org.apache.hadoop.mapreduce.task

Examples of org.apache.hadoop.mapreduce.task.JobContextImpl


                      String.valueOf(options.getMaxMaps()));
    CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
            new Path(cluster.getFileSystem().getUri().toString()
                    +"/tmp/testDynInputFormat/fileList.seq"), options);

    JobContext jobContext = new JobContextImpl(configuration, new JobID());
    DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
        new DynamicInputFormat<Text, CopyListingFileStatus>();
    List<InputSplit> splits = inputFormat.getSplits(jobContext);

    int nFiles = 0;
    int taskId = 0;

    for (InputSplit split : splits) {
      RecordReader<Text, CopyListingFileStatus> recordReader =
           inputFormat.createRecordReader(split, null);
      StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                                recordReader, taskId);
      final TaskAttemptContext taskAttemptContext
         = stubContext.getContext();
     
      recordReader.initialize(splits.get(0), taskAttemptContext);
View Full Code Here


      throws IOException {
    boolean isSupported = false;
    JobContext _jobContext;
    if (committer != null) {
      if (newApiCommitter) {
         _jobContext = new JobContextImpl(
            getConfig(), TypeConverter.fromYarn(getJobId()));
      } else {
          _jobContext = new org.apache.hadoop.mapred.JobContextImpl(
                new JobConf(getConfig()), TypeConverter.fromYarn(getJobId()));
      }
View Full Code Here

        }

        checkTaskLimits();

        if (job.newApiCommitter) {
          job.jobContext = new JobContextImpl(job.conf,
              job.oldJobId);
        } else {
          job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
              job.conf, job.oldJobId);
        }
View Full Code Here

    }

    @Override
    public JobContext createJobContext(Configuration conf,
                                       JobID jobId) {
      return new JobContextImpl(conf instanceof JobConf? new JobConf(conf) : conf,
              jobId);
    }
View Full Code Here

    return new TaskAttemptID(jobId.getJtIdentifier(), jobId.getId(), isMap ?  TaskType.MAP : TaskType.REDUCE, taskId, id);
  }

  @Override
  public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
    return new JobContextImpl(job.getConfiguration(), job.getJobID());
  }
View Full Code Here

        }

        checkTaskLimits();

        if (job.newApiCommitter) {
          job.jobContext = new JobContextImpl(job.conf,
              job.oldJobId);
        } else {
          job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
              job.conf, job.oldJobId);
        }
View Full Code Here

      this.memoryPerMap = conf.getMemoryForMapTask();
      this.memoryPerReduce = conf.getMemoryForReduceTask();

      this.taskCompletionEvents = new ArrayList<TaskCompletionEvent>(
          numMapTasks + numReduceTasks + 10);
      JobContext jobContext = new JobContextImpl(conf, jobId);
      this.jobSetupCleanupNeeded = jobContext.getJobSetupCleanupNeeded();
      this.taskCleanupNeeded = jobContext.getTaskCleanupNeeded();

      // Construct the jobACLs
      status.setJobACLs(jobtracker.getJobACLsManager().constructJobACLs(conf));

      this.mapFailuresPercent = conf.getMaxMapTaskFailuresPercent();
View Full Code Here

  public void testCommitter() throws Exception {
    Job job = Job.getInstance();
    FileOutputFormat.setOutputPath(job, outDir);
    Configuration conf = job.getConfiguration();
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

    // setup
    committer.setupJob(jContext);
View Full Code Here

  public void testAbort() throws IOException, InterruptedException {
    Job job = Job.getInstance();
    FileOutputFormat.setOutputPath(job, outDir);
    Configuration conf = job.getConfiguration();
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

    // do setup
    committer.setupJob(jContext);
View Full Code Here

    Configuration conf = job.getConfiguration();
    conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "faildel:///");
    conf.setClass("fs.faildel.impl", FakeFileSystem.class, FileSystem.class);
    conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
    FileOutputFormat.setOutputPath(job, outDir);
    JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
    TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
    FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

    // do setup
    committer.setupJob(jContext);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.task.JobContextImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.