Package org.apache.hadoop.mapreduce.v2.app.job.event

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent


    }
  }

  private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
      TaskAttemptImpl taskAttempt) {
    TaskId taskId = taskAttempt.attemptId.getTaskId();
    JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
    updateMillisCounters(jce, taskAttempt);
    return jce;
  }
View Full Code Here


    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    Map<TaskId,Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.FAILED,
        task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts =
        tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts,
        attempts.size());
    for (TaskAttempt attempt : attempts.values()) {
View Full Code Here

    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
        .next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
        .size());
View Full Code Here

    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
        .next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
        .size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt,
        TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(
        new TaskAttemptEvent(attempt.getID(),
            TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
  }
View Full Code Here

    @Override
    public JobStateInternal transition(JobImpl job, JobEvent event) {
      job.completedTaskCount++;
      LOG.info("Num completed Tasks: " + job.completedTaskCount);
      JobTaskEvent taskEvent = (JobTaskEvent) event;
      Task task = job.tasks.get(taskEvent.getTaskID());
      if (taskEvent.getState() == TaskState.SUCCEEDED) {
        taskSucceeded(job, task);
      } else if (taskEvent.getState() == TaskState.FAILED) {
        taskFailed(job, task);
      } else if (taskEvent.getState() == TaskState.KILLED) {
        taskKilled(job, task);
      }

      return checkJobAfterTaskCompletion(job);
    }
View Full Code Here

    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);

    //Fail one task. This should land the JobImpl in the FAIL_WAIT state
    job.handle(new JobTaskEvent(
      MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
      TaskState.FAILED));
    //Verify abort job hasn't been called
    Mockito.verify(committer, Mockito.never())
      .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
View Full Code Here

  private static void completeJobTasks(JobImpl job) {
    // complete the map tasks and the reduce tasks so we start committing
    int numMaps = job.getTotalMaps();
    for (int i = 0; i < numMaps; ++i) {
      job.handle(new JobTaskEvent(
          MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
          TaskState.SUCCEEDED));
      Assert.assertEquals(JobState.RUNNING, job.getState());
    }
    int numReduces = job.getTotalReduces();
    for (int i = 0; i < numReduces; ++i) {
      job.handle(new JobTaskEvent(
          MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
          TaskState.SUCCEEDED));
      Assert.assertEquals(JobState.RUNNING, job.getState());
    }
  }
View Full Code Here

    @Override
    public JobStateInternal transition(JobImpl job, JobEvent event) {
      job.completedTaskCount++;
      LOG.info("Num completed Tasks: " + job.completedTaskCount);
      JobTaskEvent taskEvent = (JobTaskEvent) event;
      Task task = job.tasks.get(taskEvent.getTaskID());
      if (taskEvent.getState() == TaskState.SUCCEEDED) {
        taskSucceeded(job, task);
      } else if (taskEvent.getState() == TaskState.FAILED) {
        taskFailed(job, task);
      } else if (taskEvent.getState() == TaskState.KILLED) {
        taskKilled(job, task);
      }

      return checkJobAfterTaskCompletion(job);
    }
View Full Code Here

    commitHandler.init(conf);
    commitHandler.start();
    JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);

    //Fail one task. This should land the JobImpl in the FAIL_WAIT state
    job.handle(new JobTaskEvent(
      MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
      TaskState.FAILED));
    //Verify abort job hasn't been called
    Mockito.verify(committer, Mockito.never())
      .abortJob((JobContext) Mockito.any(), (State) Mockito.any());
View Full Code Here

  private static void completeJobTasks(JobImpl job) {
    // complete the map tasks and the reduce tasks so we start committing
    int numMaps = job.getTotalMaps();
    for (int i = 0; i < numMaps; ++i) {
      job.handle(new JobTaskEvent(
          MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
          TaskState.SUCCEEDED));
      Assert.assertEquals(JobState.RUNNING, job.getState());
    }
    int numReduces = job.getTotalReduces();
    for (int i = 0; i < numReduces; ++i) {
      job.handle(new JobTaskEvent(
          MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP),
          TaskState.SUCCEEDED));
      Assert.assertEquals(JobState.RUNNING, job.getState());
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.