Package org.apache.hadoop.mapreduce.v2.app.job.event

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent


    //Fail / finish all the tasks. This should land the JobImpl directly in the
    //FAIL_ABORT state
    for(Task t: job.tasks.values()) {
      TaskImpl task = (TaskImpl) t;
      task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
      for(TaskAttempt ta: task.getAttempts().values()) {
        task.handle(new TaskTAttemptEvent(ta.getID(),
          TaskEventType.T_ATTEMPT_FAILED));
      }
    }
View Full Code Here


    }
  }

  protected void scheduleTasks(Set<TaskId> taskIDs) {
    for (TaskId taskID : taskIDs) {
      eventHandler.handle(new TaskEvent(taskID,
          TaskEventType.T_SCHEDULE));
    }
  }
View Full Code Here

    @Override
    public void transition(JobImpl job, JobEvent event) {
      job.addDiagnostic("Job received Kill while in RUNNING state.");
      for (Task task : job.tasks.values()) {
        job.eventHandler.handle(
            new TaskEvent(task.getID(), TaskEventType.T_KILL));
      }
      job.metrics.endRunningJob(job);
    }
View Full Code Here

    @Override
    public void transition(JobImpl job, JobEvent event) {
      job.addDiagnostic("Job received Kill while in RUNNING state.");
      for (Task task : job.tasks.values()) {
        job.eventHandler.handle(
            new TaskEvent(task.getID(), TaskEventType.T_KILL));
      }
      job.metrics.endRunningJob(job);
    }
View Full Code Here

        boolean allDone = true;
        for (Task task : job.tasks.values()) {
          if(!task.isFinished()) {
            allDone = false;
            job.eventHandler.handle(
              new TaskEvent(task.getID(), TaskEventType.T_KILL));
          }
        }

        //If all tasks are already done, we should go directly to FAIL_ABORT
        if(allDone) {
View Full Code Here

  //Add attempt to a given Task.
  protected void addSpeculativeAttempt(TaskId taskID) {
    LOG.info
        ("DefaultSpeculator.addSpeculativeAttempt -- we are speculating " + taskID);
    eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_ADD_SPEC_ATTEMPT));
    mayHaveSpeculated.add(taskID);
  }
View Full Code Here

      TaskInfo taskInfo = completedTasksFromPreviousRun.remove(taskID);
      if (taskInfo != null) {
        eventHandler.handle(new TaskRecoverEvent(taskID, taskInfo,
            committer, recoverTaskOutput));
      } else {
        eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_SCHEDULE));
      }
    }
  }
View Full Code Here

    //Fail / finish all the tasks. This should land the JobImpl directly in the
    //FAIL_ABORT state
    for(Task t: job.tasks.values()) {
      TaskImpl task = (TaskImpl) t;
      task.handle(new TaskEvent(task.getID(), TaskEventType.T_SCHEDULE));
      for(TaskAttempt ta: task.getAttempts().values()) {
        task.handle(new TaskTAttemptEvent(ta.getID(),
          TaskEventType.T_ATTEMPT_FAILED));
      }
    }
View Full Code Here

    }
  }

  protected void scheduleTasks(Set<TaskId> taskIDs) {
    for (TaskId taskID : taskIDs) {
      eventHandler.handle(new TaskEvent(taskID,
          TaskEventType.T_SCHEDULE));
    }
  }
View Full Code Here

    @Override
    public void transition(JobImpl job, JobEvent event) {
      job.addDiagnostic("Job received Kill while in RUNNING state.");
      for (Task task : job.tasks.values()) {
        job.eventHandler.handle(
            new TaskEvent(task.getID(), TaskEventType.T_KILL));
      }
      job.metrics.endRunningJob(job);
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.