Package org.apache.hadoop.mapreduce.v2.app

Examples of org.apache.hadoop.mapreduce.v2.app.TestMRApp


  public Counters getAllCounters() {

    readLock.lock();

    try {
      JobStateInternal state = getInternalState();
      if (state == JobStateInternal.ERROR || state == JobStateInternal.FAILED
          || state == JobStateInternal.KILLED || state == JobStateInternal.SUCCEEDED) {
        this.mayBeConstructFinalFullCounters();
        return fullCounters;
      }
View Full Code Here


      LOG.debug("Processing " + event.getJobId() + " of type "
          + event.getType());
    }
    try {
      writeLock.lock();
      JobStateInternal oldState = getInternalState();
      try {
         getStateMachine().doTransition(event.getType(), event);
      } catch (InvalidStateTransitonException e) {
        LOG.error("Can't handle this event at current state", e);
        addDiagnostic("Invalid event " + event.getType() +
View Full Code Here

  protected FileSystem getFileSystem(Configuration conf) throws IOException {
    return FileSystem.get(conf);
  }
 
  protected JobStateInternal checkReadyForCommit() {
    JobStateInternal currentState = getInternalState();
    if (completedTaskCount == tasks.size()
        && currentState == JobStateInternal.RUNNING) {
      eventHandler.handle(new CommitterJobCommitEvent(jobId, getJobContext()));
      return JobStateInternal.COMMITTING;
    }
View Full Code Here

    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.FAILED);
    Map<TaskId,Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    Assert.assertEquals("Task state not correct", TaskState.FAILED,
        task.getReport().getTaskState());
    Map<TaskAttemptId, TaskAttempt> attempts =
        tasks.values().iterator().next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts,
        attempts.size());
    for (TaskAttempt attempt : attempts.values()) {
View Full Code Here

    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Map<TaskId, Task> tasks = job.getTasks();
    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
    Task task = tasks.values().iterator().next();
    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
        .next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
        .size());
View Full Code Here

    @Override
    public JobStateInternal transition(JobImpl job, JobEvent event) {
      job.completedTaskCount++;
      LOG.info("Num completed Tasks: " + job.completedTaskCount);
      JobTaskEvent taskEvent = (JobTaskEvent) event;
      Task task = job.tasks.get(taskEvent.getTaskID());
      if (taskEvent.getState() == TaskState.SUCCEEDED) {
        taskSucceeded(job, task);
      } else if (taskEvent.getState() == TaskState.FAILED) {
        taskFailed(job, task);
      } else if (taskEvent.getState() == TaskState.KILLED) {
View Full Code Here

    app.waitForState(task, TaskState.SCHEDULED);
    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
        .next().getAttempts();
    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
        .size());
    TaskAttempt attempt = attempts.values().iterator().next();
    app.waitForInternalState((TaskAttemptImpl) attempt,
        TaskAttemptStateInternal.ASSIGNED);
    app.getDispatcher().getEventHandler().handle(
        new TaskAttemptEvent(attempt.getID(),
            TaskAttemptEventType.TA_CONTAINER_COMPLETED));
    app.waitForState(job, JobState.FAILED);
  }
View Full Code Here

      LOG.debug("Processing " + event.getTaskAttemptID() + " of type "
          + event.getType());
    }
    writeLock.lock();
    try {
      final TaskAttemptStateInternal oldState = getInternalState()  ;
      try {
        stateMachine.doTransition(event.getType(), event);
      } catch (InvalidStateTransitonException e) {
        LOG.error("Can't handle this event at current state for "
            + this.attemptId, e);
View Full Code Here

        recoveredState = TaskAttemptState.KILLED.toString();
        needToClean = true;
      }
    }

    TaskAttemptStateInternal attemptState;
    if (TaskAttemptState.SUCCEEDED.toString().equals(recoveredState)) {
      attemptState = TaskAttemptStateInternal.SUCCEEDED;
      reportedStatus.taskState = TaskAttemptState.SUCCEEDED;
      eventHandler.handle(createJobCounterUpdateEventTASucceeded(this));
      logAttemptFinishedEvent(attemptState);
View Full Code Here

  private static class CommitFailedTransition implements
      SingleArcTransition<JobImpl, JobEvent> {
    @Override
    public void transition(JobImpl job, JobEvent event) {
      JobCommitFailedEvent jcfe = (JobCommitFailedEvent)event;
      job.addDiagnostic("Job commit failed: " + jcfe.getMessage());
      job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId,
          job.jobContext,
          org.apache.hadoop.mapreduce.JobStatus.State.FAILED));
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.TestMRApp

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.