Package org.apache.hadoop.tools.rumen

Examples of org.apache.hadoop.tools.rumen.TaskInfo


      long[] specInputBytes = new long[runTasks.length];
      long[] specOutputRecords = new long[runTasks.length];
      long[] specOutputBytes = new long[runTasks.length];

      for (int i = 0; i < runTasks.length; ++i) {
        final TaskInfo specInfo;
        final Counters counters = runTasks[i].getCounters();
        switch (type) {
          case MAP:
             runInputBytes[i] = counters.findCounter("FileSystemCounters",
                 "HDFS_BYTES_READ").getValue() -
                 counters.findCounter(SPLIT_RAW_BYTES).getValue();
             runInputRecords[i] =
               (int)counters.findCounter(MAP_INPUT_RECORDS).getValue();
             runOutputBytes[i] =
               counters.findCounter(MAP_OUTPUT_BYTES).getValue();
             runOutputRecords[i] =
               (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue();

            specInfo = spec.getTaskInfo(TaskType.MAP, i);
            specInputRecords[i] = specInfo.getInputRecords();
            specInputBytes[i] = specInfo.getInputBytes();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
                 specInputBytes[i], specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  %9d -> %9d :: %5d -> %5d\n",
                 runInputBytes[i], runOutputBytes[i],
                 runInputRecords[i], runOutputRecords[i]);
            break;
          case REDUCE:
            runInputBytes[i] = 0;
            runInputRecords[i] =
              (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue();
            runOutputBytes[i] =
              counters.findCounter("FileSystemCounters",
                  "HDFS_BYTES_WRITTEN").getValue();
            runOutputRecords[i] =
              (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue();


            specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
            // There is no reliable counter for reduce input bytes. The
            // variable-length encoding of intermediate records and other noise
            // make this quantity difficult to estimate. The shuffle and spec
            // input bytes are included in debug output for reference, but are
            // not checked
            specInputBytes[i] = 0;
            specInputRecords[i] = specInfo.getInputRecords();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
                 specInfo.getInputBytes(), specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  (%9d) -> %9d :: %5d -> %5d\n",
                 counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(),
                 runOutputBytes[i], runInputRecords[i], runOutputRecords[i]);
            break;
View Full Code Here


   
    @Override
    public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
      switch (taskType) {
        case MAP:
          return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
        case REDUCE:
          return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
        default:
          throw new IllegalArgumentException("Not interested");
      }
    }
View Full Code Here

      TaskType taskType, int taskNumber, int taskAttemptNumber) {
      switch (taskType) {
        case MAP:
          return new MapTaskAttemptInfo(
            State.SUCCEEDED,
            new TaskInfo(
              m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1),
            100);

        case REDUCE:
          return new ReduceTaskAttemptInfo(
            State.SUCCEEDED,
            new TaskInfo(
              r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1),
            100, 100, 100);
      }
      throw new UnsupportedOperationException();
View Full Code Here

    long expMapOutputBytes = 0;
    long expMapInputRecs = 0;
    long expMapOutputRecs = 0;
    Map<String,Long> mapCounters = new HashMap<String,Long>();
    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
      expMapInputBytes += mapTask.getInputBytes();
      expMapOutputBytes += mapTask.getOutputBytes();
      expMapInputRecs += mapTask.getInputRecords();
      expMapOutputRecs += mapTask.getOutputRecords();
    }
    mapCounters.put("MAP_INPUT_BYTES", expMapInputBytes);
    mapCounters.put("MAP_OUTPUT_BYTES", expMapOutputBytes);
    mapCounters.put("MAP_INPUT_RECS", expMapInputRecs);
    mapCounters.put("MAP_OUTPUT_RECS", expMapOutputRecs);
View Full Code Here

    long expReduceOutputBytes = 0;
    long expReduceInputRecs = 0;
    long expReduceOutputRecs = 0;
    Map<String,Long> reduceCounters = new HashMap<String,Long>();
    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
      expReduceInputBytes += reduceTask.getInputBytes();
      expReduceOutputBytes += reduceTask.getOutputBytes();
      expReduceInputRecs += reduceTask.getInputRecords();
      expReduceOutputRecs += reduceTask.getOutputRecords();
    }
    reduceCounters.put("REDUCE_INPUT_BYTES", expReduceInputBytes);
    reduceCounters.put("REDUCE_OUTPUT_BYTES", expReduceOutputBytes);
    reduceCounters.put("REDUCE_INPUT_RECS", expReduceInputRecs);
    reduceCounters.put("REDUCE_OUTPUT_RECS", expReduceOutputRecs);
View Full Code Here

    }
   
    if (isMemEmulOn) {
     
      for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
        TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
        if (mapTask.getResourceUsageMetrics().getHeapUsage() > 0) {
          origJobMapsTHU +=
                  mapTask.getResourceUsageMetrics().getHeapUsage();
        }
      }
      LOG.info("Total Heap Usage of Maps for original job: "
              + origJobMapsTHU);

      for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
        TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
        if (reduceTask.getResourceUsageMetrics().getHeapUsage() > 0) {
          origJobReducesTHU +=
                  reduceTask.getResourceUsageMetrics().getHeapUsage();
        }
      }
      LOG.info("Total Heap Usage of Reduces for original job: "
              + origJobReducesTHU);
     
View Full Code Here

    long mapTotalCPUUsage = 0;
    long reduceTotalCPUUsage = 0;
    Map<String,Long> resourceMetrics = new HashMap<String,Long>();

    for (int index = 0; index < zombieJob.getNumberMaps(); index++) {
      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
      if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
        mapTotalCPUUsage +=
            mapTask.getResourceUsageMetrics().getCumulativeCpuUsage();
      }
    }
    resourceMetrics.put("MAP", mapTotalCPUUsage);

    for (int index = 0; index < zombieJob.getNumberReduces(); index++) {
      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
      if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
        reduceTotalCPUUsage +=
            reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage();
      }
    }
    resourceMetrics.put("REDUCE", reduceTotalCPUUsage);
    return resourceMetrics;
  }
View Full Code Here

      long[] specInputBytes = new long[runTasks.length];
      long[] specOutputRecords = new long[runTasks.length];
      long[] specOutputBytes = new long[runTasks.length];

      for (int i = 0; i < runTasks.length; ++i) {
        final TaskInfo specInfo;
        final Counters counters = runTasks[i].getTaskCounters();
        switch (type) {
          case MAP:
             runInputBytes[i] = counters.findCounter("FileSystemCounters",
                 "HDFS_BYTES_READ").getValue() -
                 counters.findCounter(TaskCounter.SPLIT_RAW_BYTES).getValue();
             runInputRecords[i] =
               (int)counters.findCounter(MAP_INPUT_RECORDS).getValue();
             runOutputBytes[i] =
               counters.findCounter(MAP_OUTPUT_BYTES).getValue();
             runOutputRecords[i] =
               (int)counters.findCounter(MAP_OUTPUT_RECORDS).getValue();

            specInfo = spec.getTaskInfo(TaskType.MAP, i);
            specInputRecords[i] = specInfo.getInputRecords();
            specInputBytes[i] = specInfo.getInputBytes();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: %9d -> %9d :: %5d -> %5d\n",
                 specInputBytes[i], specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  %9d -> %9d :: %5d -> %5d\n",
                 runInputBytes[i], runOutputBytes[i],
                 runInputRecords[i], runOutputRecords[i]);
            break;
          case REDUCE:
            runInputBytes[i] = 0;
            runInputRecords[i] =
              (int)counters.findCounter(REDUCE_INPUT_RECORDS).getValue();
            runOutputBytes[i] =
              counters.findCounter("FileSystemCounters",
                  "HDFS_BYTES_WRITTEN").getValue();
            runOutputRecords[i] =
              (int)counters.findCounter(REDUCE_OUTPUT_RECORDS).getValue();


            specInfo = spec.getTaskInfo(TaskType.REDUCE, i);
            // There is no reliable counter for reduce input bytes. The
            // variable-length encoding of intermediate records and other noise
            // make this quantity difficult to estimate. The shuffle and spec
            // input bytes are included in debug output for reference, but are
            // not checked
            specInputBytes[i] = 0;
            specInputRecords[i] = specInfo.getInputRecords();
            specOutputRecords[i] = specInfo.getOutputRecords();
            specOutputBytes[i] = specInfo.getOutputBytes();
            System.out.printf(type + " SPEC: (%9d) -> %9d :: %5d -> %5d\n",
                 specInfo.getInputBytes(), specOutputBytes[i],
                 specInputRecords[i], specOutputRecords[i]);
            System.out.printf(type + " RUN:  (%9d) -> %9d :: %5d -> %5d\n",
                 counters.findCounter(REDUCE_SHUFFLE_BYTES).getValue(),
                 runOutputBytes[i], runInputRecords[i], runOutputRecords[i]);
            break;
View Full Code Here

   
    @Override
    public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
      switch (taskType) {
        case MAP:
          return new TaskInfo(m_bytesIn[taskNumber], m_recsIn[taskNumber],
              m_bytesOut[taskNumber], m_recsOut[taskNumber], -1);
        case REDUCE:
          return new TaskInfo(r_bytesIn[taskNumber], r_recsIn[taskNumber],
              r_bytesOut[taskNumber], r_recsOut[taskNumber], -1);
        default:
          throw new IllegalArgumentException("Not interested");
      }
    }
View Full Code Here

    org.apache.hadoop.mapred.TaskAttemptID taskIdOldApi =
        org.apache.hadoop.mapred.TaskAttemptID.downgrade(taskId);       
    Task task = new MapTask("dummyjobfile", taskIdOldApi, 0, new TaskSplitIndex(),
                             numSlotsRequired);
    // all byte counters are 0
    TaskInfo taskInfo = new TaskInfo(0, 0, 0, 0, 0);
    MapTaskAttemptInfo taskAttemptInfo =
        new MapTaskAttemptInfo(State.SUCCEEDED, taskInfo, mapRuntime);
    TaskTrackerAction action =
        new SimulatorLaunchTaskAction(task, taskAttemptInfo);
    heartbeats.get(mapStart).get(taskTrackerName).addTaskTrackerAction(action);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.tools.rumen.TaskInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.