Package org.apache.hadoop.mapreduce.v2.app.job.event

Examples of org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent


          TaskAttemptContext taskContext = new TaskAttemptContextImpl(getConfig(),
              attInfo.getAttemptId());
          try {
            committer.recoverTask(taskContext);
          } catch (IOException e) {
            actualHandler.handle(new JobDiagnosticsUpdateEvent(
                aId.getTaskId().getJobId(), "Error in recovering task output " +
                e.getMessage()));
            actualHandler.handle(new JobEvent(aId.getTaskId().getJobId(),
                JobEventType.INTERNAL_ERROR));
          }
View Full Code Here


      try {
        stateMachine.doTransition(event.getType(), event);
      } catch (InvalidStateTransitonException e) {
        LOG.error("Can't handle this event at current state for "
            + this.attemptId, e);
        eventHandler.handle(new JobDiagnosticsUpdateEvent(
            this.attemptId.getTaskId().getJobId(), "Invalid event " + event.getType() +
            " on TaskAttempt " + this.attemptId));
        eventHandler.handle(new JobEvent(this.attemptId.getTaskId().getJobId(),
            JobEventType.INTERNAL_ERROR));
      }
View Full Code Here

    }
  }

  protected void internalError(TaskEventType type) {
    LOG.error("Invalid event " + type + " on Task " + this.taskId);
    eventHandler.handle(new JobDiagnosticsUpdateEvent(
        this.taskId.getJobId(), "Invalid event " + type +
        " on Task " + this.taskId));
    eventHandler.handle(new JobEvent(this.taskId.getJobId(),
        JobEventType.INTERNAL_ERROR));
  }
View Full Code Here

              LOG.info("Will not try to recover output for "
                  + taskContext.getTaskAttemptID());
            }
          } catch (IOException e) {
            LOG.error("Caught an exception while trying to recover task "+aId, e);
            actualHandler.handle(new JobDiagnosticsUpdateEvent(
                aId.getTaskId().getJobId(), "Error in recovering task output " +
                e.getMessage()));
            actualHandler.handle(new JobEvent(aId.getTaskId().getJobId(),
                JobEventType.INTERNAL_ERROR));
          }
View Full Code Here

    }
  }

  protected void internalError(TaskEventType type) {
    LOG.error("Invalid event " + type + " on Task " + this.taskId);
    eventHandler.handle(new JobDiagnosticsUpdateEvent(
        this.taskId.getJobId(), "Invalid event " + type +
        " on Task " + this.taskId));
    eventHandler.handle(new JobEvent(this.taskId.getJobId(),
        JobEventType.INTERNAL_ERROR));
  }
View Full Code Here

      try {
        stateMachine.doTransition(event.getType(), event);
      } catch (InvalidStateTransitonException e) {
        LOG.error("Can't handle this event at current state for "
            + this.attemptId, e);
        eventHandler.handle(new JobDiagnosticsUpdateEvent(
            this.attemptId.getTaskId().getJobId(), "Invalid event " + event.getType() +
            " on TaskAttempt " + this.attemptId));
        eventHandler.handle(new JobEvent(this.attemptId.getTaskId().getJobId(),
            JobEventType.INTERNAL_ERROR));
      }
View Full Code Here

          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
                "supported max container capability in the cluster. Killing the " +
                "Job. reduceResourceReqt: " + reduceResourceReqt +
                " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
View Full Code Here

              LOG.info("Will not try to recover output for "
                  + taskContext.getTaskAttemptID());
            }
          } catch (IOException e) {
            LOG.error("Caught an exception while trying to recover task "+aId, e);
            actualHandler.handle(new JobDiagnosticsUpdateEvent(
                aId.getTaskId().getJobId(), "Error in recovering task output " +
                e.getMessage()));
            actualHandler.handle(new JobEvent(aId.getTaskId().getJobId(),
                JobEventType.INTERNAL_ERROR));
          }
View Full Code Here

          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
                "supported max container capability in the cluster. Killing the " +
                "Job. reduceResourceReqt: " + reduceResourceReqt +
                " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
View Full Code Here

          if (mapResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "MAP capability required is more than the supported " +
            "max container capability in the cluster. Killing the Job. mapResourceReqt: " +
            mapResourceReqt + " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
        reqEvent.getCapability().setMemory(mapResourceReqt);
        scheduledRequests.addMap(reqEvent);//maps are immediately scheduled
      } else {
        if (reduceResourceReqt == 0) {
          reduceResourceReqt = reqEvent.getCapability().getMemory();
          int minSlotMemSize = getMinContainerCapability().getMemory();
          //round off on slotsize
          reduceResourceReqt = (int) Math.ceil((float)
              reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
          eventHandler.handle(new JobHistoryEvent(jobId,
              new NormalizedResourceEvent(
                  org.apache.hadoop.mapreduce.TaskType.REDUCE,
              reduceResourceReqt)));
          LOG.info("reduceResourceReqt:"+reduceResourceReqt);
          if (reduceResourceReqt > supportedMaxContainerCapability) {
            String diagMsg = "REDUCE capability required is more than the " +
                "supported max container capability in the cluster. Killing the " +
                "Job. reduceResourceReqt: " + reduceResourceReqt +
                " maxContainerCapability:" + supportedMaxContainerCapability;
            LOG.info(diagMsg);
            eventHandler.handle(new JobDiagnosticsUpdateEvent(
                jobId, diagMsg));
            eventHandler.handle(new JobEvent(jobId, JobEventType.JOB_KILL));
          }
        }
        //set the rounded off memory
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.