Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.SleepJob$EmptySplit


      ugi = UserGroupInformation.getCurrentUser();
    }
    if (queueName != null) {
      clientConf.set(JobContext.QUEUE_NAME, queueName);
    }
    final SleepJob sleep = new SleepJob();
    sleep.setConf(clientConf);
   
    Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
        public Job run() throws IOException {
          return sleep.createJob(numMappers, numReducers, mapSleepTime,
              (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
      }});
    if (shouldComplete) {
      job.waitForCompletion(false);
    } else {
View Full Code Here


  }
 
  private void runSleepJob(JobConf conf) throws Exception {
    String[] args = { "-m", "1", "-r", "1",
                      "-mt", "10", "-rt", "10" };
    ToolRunner.run(conf, new SleepJob(), args);
  }
View Full Code Here

      ugi = UserGroupInformation.getCurrentUser();
    }
    if (queueName != null) {
      clientConf.set(JobContext.QUEUE_NAME, queueName);
    }
    final SleepJob sleep = new SleepJob();
    sleep.setConf(clientConf);
   
    Job job = ugi.doAs(new PrivilegedExceptionAction<Job>() {
        public Job run() throws IOException {
          return sleep.createJob(numMappers, numReducers, mapSleepTime,
              (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime);
      }});
    if (shouldComplete) {
      job.waitForCompletion(false);
    } else {
View Full Code Here

      // Test that a task that times out does have a stack trace
      conf = mr.createJobConf();
      conf.setInt(JobContext.TASK_TIMEOUT, 10000);
      conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(conf);
      Job job = sleepJob.createJob(1, 0, 30000, 1,0, 0);
      job.setMaxMapAttempts(1);
      prevNumDumps = MockStackDumpTaskController.numStackDumps;
      job.waitForCompletion(true);
      checkForStackDump(true, prevNumDumps);
    } finally {
View Full Code Here

    }
  }

  private void runSleepJob(JobConf conf) throws Exception {
    String[] args = { "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" };
    ToolRunner.run(conf, new SleepJob(), args);
  }
View Full Code Here

     root.getChildren().get(0).getQueueSchedulingContext().setCapacityPercent(100);

    JobConf conf = getJobConf();
    conf.setSpeculativeExecution(false);
    conf.setNumTasksToExecutePerJvm(-1);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    Job job = sleepJob.createJob(3, 3, 1, 1, 1, 1);
    job.waitForCompletion(false);
    assertFalse(
      "The submitted job successfully completed",
      job.isSuccessful());
View Full Code Here

    JobConf conf = getJobConf();
    conf.setSpeculativeExecution(false);
    conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false");
    conf.setNumTasksToExecutePerJvm(-1);
    conf.setQueueName(queues[0]);
    SleepJob sleepJob1 = new SleepJob();
    sleepJob1.setConf(conf);
    jobs[0] = sleepJob1.createJob(1, 1, 1, 1, 1, 1);
    jobs[0].submit();

    JobConf conf2 = getJobConf();
    conf2.setSpeculativeExecution(false);
    conf2.setNumTasksToExecutePerJvm(-1);
    conf2.setQueueName(queues[1]);
    SleepJob sleepJob2 = new SleepJob();
    sleepJob2.setConf(conf2);
    jobs[1] = sleepJob2.createJob(3, 3, 5, 3, 5, 3);
    jobs[0].waitForCompletion(false);
    jobs[1].waitForCompletion(false);
    assertTrue(
      "Sleep job submitted to queue 1 is not successful", jobs[0]
        .isSuccessful());
View Full Code Here

  //runs a job with a single map and many reduces. The check is
  //to see whether the job directories are cleaned up at the
  //end of the job (indirectly testing whether all tasktrackers
  //got a KillJobAction).
  private JobID runSleepJob(JobConf conf) throws Exception {
    SleepJob sleep = new SleepJob();
    sleep.setConf(conf);
    Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1);
    job.waitForCompletion(true);
    return job.getJobID();
  }
View Full Code Here

      Path inDir = new Path(testDir, "in-1");
      Path outDir = new Path(testDir, "out-1");
      JobConf jConf = mr.createJobConf();
      FileInputFormat.setInputPaths(jConf, new Path[] {inDir});
      FileOutputFormat.setOutputPath(jConf, outDir);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(jConf);
      Job job = sleepJob.createJob(1, 1, 0, 1, 0, 1);

      job.submit();
      JobID id = JobID.downgrade(job.getStatus().getJobID());
      JobInProgress jip = jobtracker.getJob(id);
     
View Full Code Here

    }
  }

  private int runSleepJob(JobConf conf) throws Exception {
    String[] args = { "-m", "3", "-r", "1", "-mt", "3000", "-rt", "1000" };
    return ToolRunner.run(conf, new SleepJob(), args);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.SleepJob$EmptySplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.