Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.SleepJob.createJob()


  //end of the job (indirectly testing whether all tasktrackers
  //got a KillJobAction).
  private JobID runSleepJob(JobConf conf) throws Exception {
    SleepJob sleep = new SleepJob();
    sleep.setConf(conf);
    Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1);
    job.waitForCompletion(true);
    return job.getJobID();
  }

  public void testJobDirCleanup() throws Exception {
View Full Code Here


      JobConf jConf = mr.createJobConf();
      FileInputFormat.setInputPaths(jConf, new Path[] {inDir});
      FileOutputFormat.setOutputPath(jConf, outDir);
      SleepJob sleepJob = new SleepJob();
      sleepJob.setConf(jConf);
      Job job = sleepJob.createJob(1, 1, 0, 1, 0, 1);

      job.submit();
      JobID id = JobID.downgrade(job.getStatus().getJobID());
      JobInProgress jip = jobtracker.getJob(id);
     
View Full Code Here

    JobClient jClient = new JobClient(conf);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    // Start the job
    Job job = sleepJob.createJob(1, 1, 5000, 1, 1000, 1);
    job.submit();
    boolean TTOverFlowMsgPresent = false;
    while (true) {
      List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
      allTaskReports.addAll(Arrays.asList(jClient
View Full Code Here

    conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, 2 * 1024L);
    JobClient jClient = new JobClient(conf);
    SleepJob sleepJob = new SleepJob();
    sleepJob.setConf(conf);
    // Start the job
    Job job = sleepJob.createJob(1, 1, 100000, 1, 100000, 1);
    job.submit();
    boolean TTOverFlowMsgPresent = false;
    while (true) {
      List<TaskReport> allTaskReports = new ArrayList<TaskReport>();
      allTaskReports.addAll(Arrays.asList(jClient
View Full Code Here

    //which might become infinite.
    int count = 0;

    SleepJob job = new SleepJob();
    job.setConf(conf);
    Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

    DistributedCache.createSymlink(conf);
    URI uri = URI.create(uriPath);
    DistributedCache.addCacheFile(uri, conf);
    JobConf jconf = new JobConf(conf);
View Full Code Here

    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);

    Job slpJob = job.createJob(1, 0, 100, 100, 100, 100);
    slpJob.submit();
    JobClient client = cluster.getJTClient().getClient();

    RunningJob rJob =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob
View Full Code Here

    ArrayList<String> taskTrackerCollection = new ArrayList<String>();

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

      DistributedCache.createSymlink(conf);
      URI uri = URI.create(uriPath);
      DistributedCache.addCacheFile(uri, conf);
      JobConf jconf = new JobConf(conf);
View Full Code Here

    boolean taskTrackerFound = false;

    do {
      SleepJob job = new SleepJob();
      job.setConf(conf);
      Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100);

      // Before starting, Modify the file
      String input = "This will be the content of\n" + "distributed cache\n";
      // Creating the path with the file
      DataOutputStream file =
View Full Code Here

  @Test
  public void testJobSubmission() throws Exception {
    Configuration conf = new Configuration(cluster.getConf());
    SleepJob job = new SleepJob();
    job.setConf(conf);
    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
    rJob = cluster.getJTClient().submitAndVerifyJob(rJob);
    cluster.getJTClient().verifyJobHistory(rJob.getJobID());
  }

  // @Test
View Full Code Here

    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    FinishTaskControlAction.configureControlActionForJob(conf);
    SleepJob job = new SleepJob();
    job.setConf(conf);

    Job rJob = job.createJob(1, 1, 100, 100, 100, 100);
    JobClient client = cluster.getJTClient().getClient();
    rJob.submit();
    RunningJob rJob1 =
        client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID()));
    JobID id = rJob.getJobID();
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.