Package org.apache.hadoop.mapreduce.v2.app.job

Examples of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt


    Task reduceTask1 = it.next();
   
    // all maps must be running
    app.waitForState(mapTask1, TaskState.RUNNING);
   
    TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator()
        .next();
   
    //before sending the TA_DONE, event make sure attempt has come to
    //RUNNING state
    app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
 
    // write output corresponding to map1 (This is just to validate that it is
    //no included in the output)
    writeBadOutput(task1Attempt1, conf);
   
    //send the done signal to the map
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            task1Attempt1.getID(),
            TaskAttemptEventType.TA_DONE));
   
    //wait for map task to complete
    app.waitForState(mapTask1, TaskState.SUCCEEDED);

    // Verify the shuffle-port
    Assert.assertEquals(5467, task1Attempt1.getShufflePort());

    //stop the app before the job completes.
    app.stop();
   
    //rerun
    //in rerun the map will be recovered from previous run
    app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false,
        ++runCount);
    conf = new Configuration();
    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
    conf.setBoolean("mapred.mapper.new-api", true);
    conf.setBoolean("mapred.reducer.new-api", true);
    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
    job = app.submit(conf);
    app.waitForState(job, JobState.RUNNING);
    Assert.assertEquals("No of tasks not correct",
       3, job.getTasks().size());
    it = job.getTasks().values().iterator();
    mapTask1 = it.next();
    mapTask2 = it.next();
    reduceTask1 = it.next();
   
    // map will be recovered, no need to send done
    app.waitForState(mapTask1, TaskState.SUCCEEDED);

    // Verify the shuffle-port after recovery
    task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
    Assert.assertEquals(5467, task1Attempt1.getShufflePort());
   
    app.waitForState(mapTask2, TaskState.RUNNING);
   
    TaskAttempt task2Attempt1 = mapTask2.getAttempts().values().iterator()
    .next();

    //before sending the TA_DONE, event make sure attempt has come to
    //RUNNING state
    app.waitForState(task2Attempt1, TaskAttemptState.RUNNING);

    //send the done signal to the map
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            task2Attempt1.getID(),
            TaskAttemptEventType.TA_DONE));

    //wait for map task to complete
    app.waitForState(mapTask2, TaskState.SUCCEEDED);

    // Verify the shuffle-port
    Assert.assertEquals(5467, task2Attempt1.getShufflePort());
   
    app.waitForState(reduceTask1, TaskState.RUNNING);
    TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
   
    // write output corresponding to reduce1
    writeOutput(reduce1Attempt1, conf);
   
    //send the done signal to the 1st reduce
    app.getContext().getEventHandler().handle(
        new TaskAttemptEvent(
            reduce1Attempt1.getID(),
            TaskAttemptEventType.TA_DONE));

    //wait for first reduce task to complete
    app.waitForState(reduceTask1, TaskState.SUCCEEDED);
   
View Full Code Here


      taskID.setTaskType(type);
      taskID.setJobId(jobID);
    }

    void addAttempt() {
      TaskAttempt taskAttempt
          = new MyTaskAttemptImpl(taskID, attempts.size(), clock);
      TaskAttemptId taskAttemptID = taskAttempt.getID();

      attempts.put(taskAttemptID, taskAttempt);

      System.out.println("TLTRE.MyTaskImpl.addAttempt " + getID());
View Full Code Here

    app.waitForState(rTask, TaskState.RUNNING);
    Map<TaskAttemptId, TaskAttempt> mAttempts = mTask.getAttempts();
    Assert.assertEquals("Num attempts is not correct", 1, mAttempts.size());
    Map<TaskAttemptId, TaskAttempt> rAttempts = rTask.getAttempts();
    Assert.assertEquals("Num attempts is not correct", 1, rAttempts.size());
    TaskAttempt mta = mAttempts.values().iterator().next();
    TaskAttempt rta = rAttempts.values().iterator().next();
    app.waitForState(mta, TaskAttemptState.RUNNING);
    app.waitForState(rta, TaskAttemptState.RUNNING);

    clock.setTime(11);
    app.getContext()
        .getEventHandler()
        .handle(new TaskAttemptEvent(mta.getID(), TaskAttemptEventType.TA_DONE));
    app.getContext()
        .getEventHandler()
        .handle(new TaskAttemptEvent(rta.getID(), TaskAttemptEventType.TA_DONE));
    app.waitForState(job, JobState.SUCCEEDED);
    Assert.assertEquals(mta.getFinishTime(), 11);
    Assert.assertEquals(mta.getLaunchTime(), 10);
    Assert.assertEquals(rta.getFinishTime(), 11);
    Assert.assertEquals(rta.getLaunchTime(), 10);
    Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize),
        job.getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_MAPS)
            .getValue());
    Assert.assertEquals(
        (int) Math.ceil((float) reduceMemMb / minContainerSize), job
View Full Code Here

  @Override
  public Counters getCounters() {
    Counters counters = null;
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt != null) {
        counters = bestAttempt.getCounters();
      } else {
        counters = TaskAttemptImpl.EMPTY_COUNTERS;
//        counters.groups = new HashMap<CharSequence, CounterGroup>();
      }
      return counters;
View Full Code Here

  @Override
  public float getProgress() {
    readLock.lock();
    try {
      TaskAttempt bestAttempt = selectBestAttempt();
      if (bestAttempt == null) {
        return 0f;
      }
      return bestAttempt.getProgress();
    } finally {
      readLock.unlock();
    }
  }
View Full Code Here

  //select the nextAttemptNumber with best progress
  // always called inside the Read Lock
  private TaskAttempt selectBestAttempt() {
    float progress = 0f;
    TaskAttempt result = null;
    for (TaskAttempt at : attempts.values()) {
      switch (at.getState()) {
     
      // ignore all failed task attempts
      case FAILED:
View Full Code Here

    }
  }

  // This is always called in the Write Lock
  private void addAndScheduleAttempt() {
    TaskAttempt attempt = createAttempt();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Created attempt " + attempt.getID());
    }
    switch (attempts.size()) {
      case 0:
        attempts = Collections.singletonMap(attempt.getID(), attempt);
        break;
       
      case 1:
        Map<TaskAttemptId, TaskAttempt> newAttempts
            = new LinkedHashMap<TaskAttemptId, TaskAttempt>(maxAttempts);
        newAttempts.putAll(attempts);
        attempts = newAttempts;
        attempts.put(attempt.getID(), attempt);
        break;

      default:
        attempts.put(attempt.getID(), attempt);
        break;
    }

    // Update nextATtemptNumber
    if (taskAttemptsFromPreviousGeneration.isEmpty()) {
      ++nextAttemptNumber;
    } else {
      // There are still some TaskAttempts from previous generation, use them
      nextAttemptNumber =
          taskAttemptsFromPreviousGeneration.remove(0).getAttemptId().getId();
    }

    inProgressAttempts.add(attempt.getID());
    //schedule the nextAttemptNumber
    if (failedAttempts.size() > 0) {
      eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
        TaskAttemptEventType.TA_RESCHEDULE));
    } else {
      eventHandler.handle(new TaskAttemptEvent(attempt.getID(),
          TaskAttemptEventType.TA_SCHEDULE));
    }
  }
View Full Code Here

  }

  // always called inside a transition, in turn inside the Write Lock
  private void handleTaskAttemptCompletion(TaskAttemptId attemptId,
      TaskAttemptCompletionEventStatus status) {
    TaskAttempt attempt = attempts.get(attemptId);
    //raise the completion event only if the container is assigned
    // to nextAttemptNumber
    if (attempt.getNodeHttpAddress() != null) {
      TaskAttemptCompletionEvent tce = recordFactory
          .newRecordInstance(TaskAttemptCompletionEvent.class);
      tce.setEventId(-1);
      tce.setMapOutputServerAddress(StringInterner.weakIntern("http://"
          + attempt.getNodeHttpAddress().split(":")[0] + ":"
          + attempt.getShufflePort()));
      tce.setStatus(status);
      tce.setAttemptId(attempt.getID());
      int runTime = 0;
      if (attempt.getFinishTime() != 0 && attempt.getLaunchTime() !=0)
        runTime = (int)(attempt.getFinishTime() - attempt.getLaunchTime());
      tce.setAttemptRunTime(runTime);
     
      //raise the event to job so that it adds the completion event to its
      //data structures
      eventHandler.handle(new JobTaskAttemptCompletedEvent(tce));
View Full Code Here

      TaskAttemptId taskAttemptId = castEvent.getTaskAttemptID();
      task.failedAttempts.add(taskAttemptId);
      if (taskAttemptId.equals(task.commitAttempt)) {
        task.commitAttempt = null;
      }
      TaskAttempt attempt = task.attempts.get(taskAttemptId);
      if (attempt.getAssignedContainerMgrAddress() != null) {
        //container was assigned
        task.eventHandler.handle(new ContainerFailedEvent(attempt.getID(),
            attempt.getAssignedContainerMgrAddress()));
      }
     
      task.finishedAttempts.add(taskAttemptId);
      if (task.failedAttempts.size() < task.maxAttempts) {
        task.handleTaskAttemptCompletion(
            taskAttemptId,
            TaskAttemptCompletionEventStatus.FAILED);
        // we don't need a new event if we already have a spare
        task.inProgressAttempts.remove(taskAttemptId);
        if (task.inProgressAttempts.size() == 0
            && task.successfulAttempt == null) {
          task.addAndScheduleAttempt();
        }
      } else {
        task.handleTaskAttemptCompletion(
            taskAttemptId,
            TaskAttemptCompletionEventStatus.TIPFAILED);

        // issue kill to all non finished attempts
        for (TaskAttempt taskAttempt : task.attempts.values()) {
          task.killUnfinishedAttempt
            (taskAttempt, "Task has failed. Killing attempt!");
        }
        task.inProgressAttempts.clear();
       
        if (task.historyTaskStartGenerated) {
        TaskFailedEvent taskFailedEvent = createTaskFailedEvent(task, attempt.getDiagnostics(),
            TaskStateInternal.FAILED, taskAttemptId);
        task.eventHandler.handle(new JobHistoryEvent(task.taskId.getJobId(),
            taskFailedEvent));
        } else {
          LOG.debug("Not generating HistoryFinish event since start event not" +
View Full Code Here

      return task;
    }

    private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID,
        boolean modifyAccess) throws YarnRemoteException {
      TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(),
          modifyAccess).getAttempt(attemptID);
      if (attempt == null) {
        throw RPCUtil.getRemoteException("Unknown TaskAttempt " + attemptID);
      }
      return attempt;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.