Package org.apache.hadoop.tools.rumen

Examples of org.apache.hadoop.tools.rumen.JobStory


      if (loggingEnabled) {
        LOG.debug("Job '" + jobId.getId() + "' already present ");
      }
      return jobs.get(jobId).getStatus();
    }
    JobStory jobStory = SimulatorJobCache.get(jobId);
    if (jobStory == null) {
      throw new IllegalArgumentException("Job not found in SimulatorJobCache: "+jobId);
    }
    validateAndSetClock(jobStory.getSubmissionTime());
   
    SimulatorJobInProgress job = new SimulatorJobInProgress(jobId, jobSubmitDir, this,
                                                            this.conf,
                                                            jobStory);
    return addJob(jobId, job);
View Full Code Here


  void buildSplits(FilePool inputDir) throws IOException {
    long mapInputBytesTotal = 0L;
    long mapOutputBytesTotal = 0L;
    long mapOutputRecordsTotal = 0L;
    final JobStory jobdesc = getJobDesc();
    if (null == jobdesc) {
      return;
    }
    final int maps = jobdesc.getNumberMaps();
    final int reds = jobdesc.getNumberReduces();
    for (int i = 0; i < maps; ++i) {
      final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i);
      mapInputBytesTotal += info.getInputBytes();
      mapOutputBytesTotal += info.getOutputBytes();
      mapOutputRecordsTotal += info.getOutputRecords();
    }
    final double[] reduceRecordRatio = new double[reds];
    final double[] reduceByteRatio = new double[reds];
    for (int i = 0; i < reds; ++i) {
      final TaskInfo info = jobdesc.getTaskInfo(TaskType.REDUCE, i);
      reduceByteRatio[i] = info.getInputBytes() / (1.0 * mapOutputBytesTotal);
      reduceRecordRatio[i] =
        info.getInputRecords() / (1.0 * mapOutputRecordsTotal);
    }
    final InputStriper striper = new InputStriper(inputDir, mapInputBytesTotal);
    final List<InputSplit> splits = new ArrayList<InputSplit>();
    for (int i = 0; i < maps; ++i) {
      final int nSpec = reds / maps + ((reds % maps) > i ? 1 : 0);
      final long[] specBytes = new long[nSpec];
      final long[] specRecords = new long[nSpec];
      for (int j = 0; j < nSpec; ++j) {
        final TaskInfo info =
          jobdesc.getTaskInfo(TaskType.REDUCE, i + j * maps);
        specBytes[j] = info.getOutputBytes();
        specRecords[j] = info.getOutputRecords();
        if (LOG.isDebugEnabled()) {
          LOG.debug(String.format("SPEC(%d) %d -> %d %d %d", id(), i,
              i + j * maps, info.getOutputRecords(), info.getOutputBytes()));
        }
      }
      final TaskInfo info = jobdesc.getTaskInfo(TaskType.MAP, i);
      splits.add(new GridmixSplit(striper.splitFor(inputDir,
              info.getInputBytes(), 3), maps, i,
            info.getInputBytes(), info.getInputRecords(),
            info.getOutputBytes(), info.getOutputRecords(),
            reduceByteRatio, reduceRecordRatio, specBytes, specRecords));
View Full Code Here

            int noOfSlotsAvailable = loadStatus.numSlotsBackfill;
            LOG.info(" No of slots to be backfilled are " + noOfSlotsAvailable);

            for (int i = 0; i < noOfSlotsAvailable; i++) {
              try {
                final JobStory job = getNextJobFiltered();
                if (null == job) {
                  return;
                }
                //TODO: We need to take care of scenario when one map takes more
                //than 1 slot.
                i += job.getNumberMaps();

                submitter.add(
                  new GridmixJob(
                    conf, 0L, job, scratch, sequence.getAndIncrement()));
              } catch (IOException e) {
View Full Code Here

  }

  protected abstract Thread createReaderThread() ;

  protected JobStory getNextJobFiltered() throws IOException {
    JobStory job;
    do {
      job = jobProducer.getNextJob();
    } while (job != null &&
      (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
        job.getSubmissionTime() < 0));
    return null == job ? null : new FilterJobStory(job) {
      @Override
      public TaskInfo getTaskInfo(TaskType taskType, int taskNumber) {
        return new MinTaskInfo(this.job.getTaskInfo(taskType, taskNumber));
      }
View Full Code Here

        LOG.info("START REPLAY @ " + initTime);
        long first = -1;
        long last = -1;
        while (!Thread.currentThread().isInterrupted()) {
          try {
            final JobStory job = getNextJobFiltered();
            if (null == job) {
              return;
            }
            if (first < 0) {
              first = job.getSubmissionTime();
            }
            final long current = job.getSubmissionTime();
            if (current < last) {
              LOG.warn("Job " + job.getJobID() + " out of order");
              continue;
            }
            last = current;
            submitter.add(
              new GridmixJob(
View Full Code Here

          return;
        }
        LOG.info("START SERIAL @ " + System.currentTimeMillis());
        GridmixJob prevJob;
        while (!Thread.currentThread().isInterrupted()) {
          final JobStory job;
          try {
            job = getNextJobFiltered();
            if (null == job) {
              return;
            }
            if (LOG.isDebugEnabled()) {
              LOG.debug(
                "Serial mode submitting job " + job.getName());
            }
            prevJob = new GridmixJob(
              conf, 0L, job, scratch,sequence.getAndIncrement());

            lock.lock();
            try {
              LOG.info(" Submitted the job " + prevJob);
              submitter.add(prevJob);
            } finally {
              lock.unlock();
            }
          } catch (IOException e) {
            error = e;
            //If submission of current job fails , try to submit the next job.
            return;
          }

          if (prevJob != null) {
            //Wait till previous job submitted is completed.
            lock.lock();
            try {
              while (true) {
                try {
                  jobCompleted.await();
                } catch (InterruptedException ie) {
                  LOG.error(
                    " Error in SerialJobFactory while waiting for job completion ",
                    ie);
                  return;
                }
                if (LOG.isDebugEnabled()) {
                  LOG.info(" job " + job.getName() + " completed ");
                }
                break;
              }
            } finally {
              lock.unlock();
View Full Code Here

              if (LOG.isDebugEnabled()) {
                LOG.debug("Cluster underloaded in run! Stressing...");
              }
              try {
                //TODO This in-line read can block submission for large jobs.
                final JobStory job = getNextJobFiltered();
                if (null == job) {
                  return;
                }
                if (LOG.isDebugEnabled()) {
                  LOG.debug("Job Selected: " + job.getJobID());
                }
                submitter.add(
                  jobCreator.createGridmixJob(
                    conf, 0L, job, scratch,
                    userResolver.getTargetUgi(
                      UserGroupInformation.createRemoteUser(job.getUser())),
                    sequence.getAndIncrement()));
                // TODO: We need to take care of scenario when one map/reduce
                // takes more than 1 slot.
               
                // Lock the loadjob as we are making updates
                int incompleteMapTasks = (int) calcEffectiveIncompleteMapTasks(
                                                 loadStatus.getMapCapacity(),
                                                 job.getNumberMaps(), 0.0f);
                loadStatus.decrementMapLoad(incompleteMapTasks);
               
                int incompleteReduceTasks =
                  (int) calcEffectiveIncompleteReduceTasks(
                          loadStatus.getReduceCapacity(),
                          job.getNumberReduces(), 0.0f);
                loadStatus.decrementReduceLoad(incompleteReduceTasks);
                 
                loadStatus.decrementJobLoad(1);
              } catch (IOException e) {
                LOG.error("Error while submitting the job ", e);
View Full Code Here

  protected abstract Thread createReaderThread() ;

  // gets the next job from the trace and does some bookkeeping for the same
  private JobStory getNextJobFromTrace() throws IOException {
    JobStory story = jobProducer.getNextJob();
    if (story != null) {
      ++numJobsInTrace;
    }
    return story;
  }
View Full Code Here

    }
    return story;
  }
 
  protected JobStory getNextJobFiltered() throws IOException {
    JobStory job = getNextJobFromTrace();
    while (job != null &&
      (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS ||
        job.getSubmissionTime() < 0)) {
      if (LOG.isDebugEnabled()) {
        String reason = null;
        if (job.getOutcome() != Pre21JobHistoryConstants.Values.SUCCESS) {
          reason = "STATE (" + job.getOutcome().name() + ") ";
        }
        if (job.getSubmissionTime() < 0) {
          reason += "SUBMISSION-TIME (" + job.getSubmissionTime() + ")";
        }
        LOG.debug("Ignoring job " + job.getJobID() + " from the input trace."
                  + " Reason: " + reason == null ? "N/A" : reason);
      }
      job = getNextJobFromTrace();
    }
    return null == job ? null : new FilterJobStory(job) {
View Full Code Here

    DebugJobProducer jobProducer = new DebugJobProducer(5, configuration);
    configuration.setBoolean(SleepJob.SLEEPJOB_MAPTASK_ONLY, true);

    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
    JobStory story;
    int seq = 1;
    while ((story = jobProducer.getNextJob()) != null) {
      GridmixJob gridmixJob = JobCreator.SLEEPJOB.createGridmixJob(configuration, 0,
              story, new Path("ignored"), ugi, seq++);
      gridmixJob.buildSplits(null);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.tools.rumen.JobStory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.