Package org.apache.hadoop.mapreduce.v2.api.records

Examples of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId


    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
      ContainerId contId = makeContainerId(0l, 0, 0, 1);
      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
      String cmAddress = "127.0.0.1:8000";
      StartContainersResponse startResp =
        recordFactory.newRecordInstance(StartContainersResponse.class);
      startResp.setAllServicesMetaData(serviceResponse);
View Full Code Here


    Configuration conf = new Configuration();
    ut.init(conf);
    ut.start();
    try {
      ContainerId contId = makeContainerId(0l, 0, 0, 1);
      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
      String cmAddress = "127.0.0.1:8000";
      StartContainersResponse startResp =
        recordFactory.newRecordInstance(StartContainersResponse.class);
      startResp.setAllServicesMetaData(serviceResponse);
     
View Full Code Here

              getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress());
        }
      });
     
      for (int i = 0; i < toPreempt && reduceList.size() > 0; i++) {
        TaskAttemptId id = reduceList.remove(0);//remove the one on top
        LOG.info("Preempting " + id);
        preemptionWaitingReduces.add(id);
        eventHandler.handle(new TaskAttemptKillEvent(id, RAMPDOWN_DIAGNOSTIC));
      }
    }
View Full Code Here

        } else if (event.getType() == EventType.CONTAINER_REMOTE_CLEANUP) {

          // cancel (and interrupt) the current running task associated with the
          // event
          TaskAttemptId taId = event.getTaskAttemptID();
          Future<?> future = futures.remove(taId);
          if (future != null) {
            LOG.info("canceling the task attempt " + taId);
            future.cancel(true);
          }
View Full Code Here

    }

    @SuppressWarnings("unchecked")
    private void runTask(ContainerRemoteLaunchEvent launchEv,
        Map<TaskAttemptID, MapOutputFile> localMapFiles) {
      TaskAttemptId attemptID = launchEv.getTaskAttemptID();

      Job job = context.getAllJobs().get(attemptID.getTaskId().getJobId());
      int numMapTasks = job.getTotalMaps();
      int numReduceTasks = job.getTotalReduces();

      // YARN (tracking) Task:
      org.apache.hadoop.mapreduce.v2.app.job.Task ytask =
          job.getTask(attemptID.getTaskId());
      // classic mapred Task:
      org.apache.hadoop.mapred.Task remoteTask = launchEv.getRemoteTask();

      // after "launching," send launched event to task attempt to move
      // state from ASSIGNED to RUNNING (also nukes "remoteTask", so must
      // do getRemoteTask() call first)
     
      //There is no port number because we are not really talking to a task
      // tracker.  The shuffle is just done through local files.  So the
      // port number is set to -1 in this case.
      context.getEventHandler().handle(
          new TaskAttemptContainerLaunchedEvent(attemptID, -1));

      if (numMapTasks == 0) {
        doneWithMaps = true;
      }

      try {
        if (remoteTask.isMapOrReduce()) {
          JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
          jce.addCounterUpdate(JobCounter.TOTAL_LAUNCHED_UBERTASKS, 1);
          if (remoteTask.isMapTask()) {
            jce.addCounterUpdate(JobCounter.NUM_UBER_SUBMAPS, 1);
          } else {
            jce.addCounterUpdate(JobCounter.NUM_UBER_SUBREDUCES, 1);
          }
          context.getEventHandler().handle(jce);
        }
        runSubtask(remoteTask, ytask.getType(), attemptID, numMapTasks,
                   (numReduceTasks > 0), localMapFiles);
       
      } catch (RuntimeException re) {
        JobCounterUpdateEvent jce = new JobCounterUpdateEvent(attemptID.getTaskId().getJobId());
        jce.addCounterUpdate(JobCounter.NUM_FAILED_UBERTASKS, 1);
        context.getEventHandler().handle(jce);
        // this is our signal that the subtask failed in some way, so
        // simulate a failed JVM/container and send a container-completed
        // event to task attempt (i.e., move state machine from RUNNING
View Full Code Here

    // create mocked job, task, and task attempt
    // a single-mapper job
    JobId jobId = MRBuilderUtils.newJobId(System.currentTimeMillis(), 1, 1);
    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
    TaskAttemptId taId = MRBuilderUtils.newTaskAttemptId(taskId, 0);

    Job job = mock(Job.class);
    when(job.getTotalMaps()).thenReturn(1);
    when(job.getTotalReduces()).thenReturn(0);
    Map<JobId,Job> jobs = new HashMap<JobId,Job>();
View Full Code Here

    lfc.create(mapOut, EnumSet.of(CREATE)).close();
    lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();

    final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
    final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
    final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);

    LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
  }
View Full Code Here

    } else if (
        event.getType() == ContainerAllocator.EventType.CONTAINER_DEALLOCATE) {
 
      LOG.info("Processing the event " + event.toString());

      TaskAttemptId aId = event.getAttemptID();
     
      boolean removed = scheduledRequests.remove(aId);
      if (!removed) {
        ContainerId containerId = assignedRequests.get(aId);
        if (containerId != null) {
View Full Code Here

    handleUpdatedNodes(response);

    for (ContainerStatus cont : finishedContainers) {
      LOG.info("Received completed container " + cont.getContainerId());
      TaskAttemptId attemptID = assignedRequests.get(cont.getContainerId());
      if (attemptID == null) {
        LOG.error("Container complete event for unknown container id "
            + cont.getContainerId());
      } else {
        assignedRequests.remove(attemptID);
View Full Code Here

      for (int i = 0; i < 2; ++i) {
        HashMap<TaskAttemptId, Container> taskSet = i == 0 ? assignedRequests.maps
            : assignedRequests.reduces;
        // kill running containers
        for (Map.Entry<TaskAttemptId, Container> entry : taskSet.entrySet()) {
          TaskAttemptId tid = entry.getKey();
          NodeId taskAttemptNodeId = entry.getValue().getNodeId();
          if (unusableNodes.contains(taskAttemptNodeId)) {
            LOG.info("Killing taskAttempt:" + tid
                + " because it is running on unusable node:"
                + taskAttemptNodeId);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.