Examples of JobHistoryEvent


Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

          new JobUnsuccessfulCompletionEvent(oldJobId,
              finishTime,
              succeededMapTaskCount,
              succeededReduceTaskCount,
              finalState.toString());
      eventHandler.handle(new JobHistoryEvent(jobId,
          unsuccessfulJobEvent));
      finished(finalState);
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

            job.conf.get(MRJobConfig.WORKFLOW_ID, ""),
            job.conf.get(MRJobConfig.WORKFLOW_NAME, ""),
            job.conf.get(MRJobConfig.WORKFLOW_NODE_NAME, ""),
            getWorkflowAdjacencies(job.conf),
            job.conf.get(MRJobConfig.WORKFLOW_TAGS, ""));
        job.eventHandler.handle(new JobHistoryEvent(job.jobId, jse));
        //TODO JH Verify jobACLs, UserName via UGI?

        TaskSplitMetaInfo[] taskSplitMetaInfo = createSplits(job, job.jobId);
        job.numMapTasks = taskSplitMetaInfo.length;
        job.numReduceTasks = job.conf.getInt(MRJobConfig.NUM_REDUCES, 0);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

        new JobInitedEvent(job.oldJobId,
             job.startTime,
             job.numMapTasks, job.numReduceTasks,
             job.getState().toString(),
             job.isUber());
      job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
      JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
          job.appSubmitTime, job.startTime);
      job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
      job.metrics.runningJob(job);

      job.eventHandler.handle(new CommitterJobSetupEvent(
              job.jobId, job.jobContext));
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

      job.setFinishTime();
      JobUnsuccessfulCompletionEvent failedEvent =
          new JobUnsuccessfulCompletionEvent(job.oldJobId,
              job.finishTime, 0, 0,
              JobStateInternal.KILLED.toString());
      job.eventHandler.handle(new JobHistoryEvent(job.jobId, failedEvent));
      job.finished(JobStateInternal.KILLED);
    }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

      int supportedMaxContainerCapability =
          getMaxContainerCapability().getMemory();
      if (reqEvent.getAttemptID().getTaskId().getTaskType().equals(TaskType.MAP)) {
        if (mapResourceRequest == 0) {
          mapResourceRequest = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
                  mapResourceRequest)));
          LOG.info("mapResourceRequest:"+ mapResourceRequest);
          if (mapResourceRequest > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceRequest: " +
                mapResourceRequest + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceRequest);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceRequest == 0) {
          reduceResourceRequest = reqEvent.getCapability().getMemory();
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
                  reduceResourceRequest)));
          LOG.info("reduceResourceRequest:"+ reduceResourceRequest);
          if (reduceResourceRequest > supportedMaxContainerCapability) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

    // End of creating the job.

    // Send out an MR AM inited event for all previous AMs.
    for (AMInfo info : amInfos) {
      dispatcher.getEventHandler().handle(
          new JobHistoryEvent(job.getID(), new AMStartedEvent(info
              .getAppAttemptId(), info.getStartTime(), info.getContainerId(),
              info.getNodeManagerHost(), info.getNodeManagerPort(), info
                  .getNodeManagerHttpPort())));
    }

    // Send out an MR AM inited event for this AM.
    dispatcher.getEventHandler().handle(
        new JobHistoryEvent(job.getID(), new AMStartedEvent(amInfo
            .getAppAttemptId(), amInfo.getStartTime(), amInfo.getContainerId(),
            amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort(), amInfo
                .getNodeManagerHttpPort(), this.forcedState == null ? null
                    : this.forcedState.toString())));
    amInfos.add(amInfo);
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

    while (ie.hasNext()) {
      Object current = ie.next();
      ++eventNum;
      LOG.info(eventNum + " " + current.getClass().getName());
      if (current instanceof JobHistoryEvent) {
        JobHistoryEvent jhe = (JobHistoryEvent) current;
        LOG.info(expectedJobHistoryEvents.get(0).toString() + " " +
            jhe.getHistoryEvent().getEventType().toString() + " " +
            jhe.getJobID());
        assertEquals(expectedJobHistoryEvents.get(0),
            jhe.getHistoryEvent().getEventType());
        expectedJobHistoryEvents.remove(0);
      else if (current instanceof JobCounterUpdateEvent) {
        JobCounterUpdateEvent jcue = (JobCounterUpdateEvent) current;

        LOG.info("JobCounterUpdateEvent "
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

        if (mapResourceReqt == 0) {
          mapResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize)
              * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
              mapResourceReqt)));
          LOG.info("mapResourceReqt:"+mapResourceReqt);
          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

  void logJobHistoryFinishedEvent() {
    this.setFinishTime();
    JobFinishedEvent jfe = createJobFinishedEvent(this);
    LOG.info("Calling handler for JobFinishedEvent ");
    this.getEventHandler().handle(new JobHistoryEvent(this.jobId, jfe));   
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent

      new JobUnsuccessfulCompletionEvent(oldJobId,
          finishTime,
          succeededMapTaskCount,
          succeededReduceTaskCount,
          finalState.toString());
    eventHandler.handle(new JobHistoryEvent(jobId, unsuccessfulJobEvent));
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.