Examples of TTClient


Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo taskInfo = null;
    TaskID tID = null;
    TTTaskInfo [] ttTaskinfo = null;
    String pid = null;
    TTProtocol ttIns = null;
    TTClient ttClientIns = null;
    int counter = 0;

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("Message Display");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.FailedMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());   
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
   
    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }

    Assert.assertTrue("Map process is not alive before task fails.",
            ttIns.isProcessTreeAlive(pid));

    LOG.info("Waiting till the task is failed...");
    counter = 0;
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() ==
            TaskStatus.State.FAILED) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      counter++;
    }

    LOG.info("Waiting till the job is completed...");
    counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().isJobComplete()) {
        break;
      }
      UtilsForTests.waitFor(1000);
      jInfo = wovenClient.getJobInfo(id);
      counter ++;
    }
    Assert.assertTrue("Job has not been completed for 1 min.",
        counter != 60);
    ttIns = ttClientIns.getProxy();
    UtilsForTests.waitFor(2000);
    Assert.assertTrue("Map process is still alive after task has been failed.",
            !ttIns.isProcessTreeAlive(pid));
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo taskInfo = null;
    TaskID tID = null;
    TTTaskInfo [] ttTaskinfo = null;
    String pid = null;
    TTProtocol ttIns = null;
    TTClient ttClientIns = null;
    int counter = 0;
   
    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("Message Display");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrDisplayMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }


    Assert.assertTrue("Map process is not alive before task fails.",
        ttIns.isProcessTreeAlive(pid));

    String args[] = new String[] { "-fail-task", tAttID.toString() };
    int exitCode = runTool(jobConf, client, args);
    Assert.assertEquals("Exit Code:", 0, exitCode);

    LOG.info("Waiting till the task is failed...");
    taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
    counter = 0;
    while (counter < 60) {
      if (taskInfo.getTaskStatus().length > 0) {
        if (taskInfo.getTaskStatus()[0].getRunState() ==
            TaskStatus.State.FAILED) {
          break;
        }
      }
      UtilsForTests.waitFor(1000);
      taskInfo = wovenClient.getTaskInfo(taskInfo.getTaskID());
      counter ++;
    }
    counter = 0;
    LOG.info("Waiting till the job is completed...");
    while (counter < 60) {
      if (jInfo.getStatus().isJobComplete()) {
        break;
      }
      UtilsForTests.waitFor(1000);
      jInfo = wovenClient.getJobInfo(id);
      counter ++;
    }

    Assert.assertTrue("Job has not been completed for 1 min",
        counter != 60);
    ttIns = ttClientIns.getProxy();
    UtilsForTests.waitFor(1000);
    Assert.assertTrue("Map process is still alive after task has been failed.",
        !ttIns.isProcessTreeAlive(pid));
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

  public void testDirCleanupAfterTaskKilled() throws IOException,
          InterruptedException {
    TaskInfo taskInfo = null;
    boolean isTempFolderExists = false;
    String localTaskDir = null;
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    Path inputDir = new Path("input");
    Path outputDir = new Path("output");
    Configuration conf = new Configuration(cluster.getConf());
    JobConf jconf = new JobConf(conf);
    jconf.setJobName("Word Count");
    jconf.setJarByClass(WordCount.class);
    jconf.setMapperClass(WordCount.MapClass.class);
    jconf.setCombinerClass(WordCount.Reduce.class);
    jconf.setReducerClass(WordCount.Reduce.class);
    jconf.setNumMapTasks(1);
    jconf.setNumReduceTasks(1);
    jconf.setOutputKeyClass(Text.class);
    jconf.setOutputValueClass(IntWritable.class);

    cleanup(inputDir, conf);
    cleanup(outputDir, conf);
    createInput(inputDir, conf);
    FileInputFormat.setInputPaths(jconf, inputDir);
    FileOutputFormat.setOutputPath(jconf, outputDir);
    RunningJob runJob = jobClient.submitJob(jconf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    Assert.assertTrue("Job has not been started for 1 min.",
       jtClient.isJobStarted(id));

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
       jtClient.isTaskStarted(taskInfo));

    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    String[] taskTrackers = taskInfo.getTaskTrackers();
    int counter = 0;
    TaskInfo prvTaskInfo = taskInfo;
    while (counter++ < 30) {
      if (taskTrackers.length > 0) {
        break;
      } else {
        UtilsForTests.waitFor(100);
        taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
        if (taskInfo == null) {
          taskInfo = prvTaskInfo;
        } else {
          prvTaskInfo = taskInfo;
        }
        taskTrackers = taskInfo.getTaskTrackers();
      }
    }
    Assert.assertTrue("TaskTracker is not found.", taskTrackers.length > 0);
    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);   
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        break;
      }
    }
   
    Assert.assertTrue("Task Attempt directory " +
            taskAttID + " has not been found while task was running.",
                    isTempFolderExists);
   
    RunningJob networkJob = jobClient.getJob(id);
    networkJob.killTask(taskAttID, false);
    ttClient.getProxy().sendAction(action);
    taskInfo = remoteJTClient.getTaskInfo(tID);
    while(taskInfo.getTaskStatus()[0].getRunState() ==
       TaskStatus.State.RUNNING) {
    UtilsForTests.waitFor(1000);
    taskInfo = remoteJTClient.getTaskInfo(tID);
    }
    UtilsForTests.waitFor(1000);
    taskInfo = remoteJTClient.getTaskInfo(tID);
    Assert.assertTrue("Task status has not been changed to KILLED.",
       (TaskStatus.State.KILLED ==
       taskInfo.getTaskStatus()[0].getRunState()
       || TaskStatus.State.KILLED_UNCLEAN ==
       taskInfo.getTaskStatus()[0].getRunState()));
    taskInfo = remoteJTClient.getTaskInfo(tID);
    counter = 0;
    while (counter++ < 60) {
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length == 0) {
        break;
      } else {
        UtilsForTests.waitFor(100);
      }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

   * after failing the task.
   */
  @Test
  public void testDirCleanupAfterTaskFailed() throws IOException,
          InterruptedException {
    TTClient ttClient = null;
    FileStatus filesStatus [] = null;
    String localTaskDir = null;
    TaskInfo taskInfo = null;
    TaskID tID = null;
    boolean isTempFolderExists = false;
    conf = remoteJTClient.getDaemonConf();
    SleepJob job = new SleepJob();
    job.setConf(conf);
    JobConf jobConf = job.setupJobConf(1, 0, 10000,100, 10, 10);
    RunningJob runJob = jobClient.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = remoteJTClient.getJobInfo(id);
    Assert.assertTrue("Job has not been started for 1 min.",
       jtClient.isJobStarted(id));

    JobStatus[] jobStatus = jobClient.getAllJobs();
    String userName = jobStatus[0].getUsername();
    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup() && taskinfo.getTaskID().isMap()) {
        taskInfo = taskinfo;
        break;
      }
    }
    Assert.assertTrue("Task has not been started for 1 min.",
       jtClient.isTaskStarted(taskInfo));
   
    tID = TaskID.downgrade(taskInfo.getTaskID());
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    String[] taskTrackers = taskInfo.getTaskTrackers();
    int counter = 0;
    TaskInfo prvTaskInfo = taskInfo;
    while (counter++ < 30) {
      if (taskTrackers.length > 0) {
        break;
      } else {
        UtilsForTests.waitFor(1000);
        taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
        if (taskInfo == null) {
          taskInfo = prvTaskInfo;
        } else {
          prvTaskInfo = taskInfo;
        }
        taskTrackers = taskInfo.getTaskTrackers();
      }
    }
    Assert.assertTrue("Task tracker not found.", taskTrackers.length > 0);
    String hostName = taskTrackers[0].split("_")[1];
    hostName = hostName.split(":")[0];
    ttClient = cluster.getTTClient(hostName);
    String localDirs[] = ttClient.getMapredLocalDirs();
    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
    for (String localDir : localDirs) {
      localTaskDir = localDir + "/"
              + TaskTracker.getLocalTaskDir(userName,
                      id.toString(), taskAttID.toString());
      filesStatus = ttClient.listStatus(localTaskDir, true);
      if (filesStatus.length > 0) {
        isTempFolderExists = true;
        break;
      }
    }   
   
    Assert.assertTrue("Task Attempt directory " +
            taskAttID + " has not been found while task was running.",
                    isTempFolderExists);
    boolean isFailTask = false;
    JobInfo jobInfo = remoteJTClient.getJobInfo(id);
    int MAX_MAP_TASK_ATTEMPTS = Integer.parseInt(
       jobConf.get("mapred.map.max.attempts"));
    if (!isFailTask) {       
        TaskID taskId = TaskID.downgrade(taskInfo.getTaskID());
        TaskAttemptID tAttID = new TaskAttemptID(taskId,
            taskInfo.numFailedAttempts());
        while(taskInfo.numFailedAttempts() < MAX_MAP_TASK_ATTEMPTS) {
          RunningJob networkJob = jobClient.getJob(id);
          networkJob.killTask(taskAttID, true);
          taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
          taskAttID = new TaskAttemptID(taskId, taskInfo.numFailedAttempts());
        }
        isFailTask=true;
      }
   
    ttClient.getProxy().sendAction(action);
    taskInfo = remoteJTClient.getTaskInfo(tID);
    Assert.assertTrue("Task status has not been changed to FAILED.",
       TaskStatus.State.FAILED ==
       taskInfo.getTaskStatus()[0].getRunState()
       || TaskStatus.State.FAILED_UNCLEAN ==
       taskInfo.getTaskStatus()[0].getRunState());
    UtilsForTests.waitFor(1000);
    filesStatus = ttClient.listStatus(localTaskDir, true);
    Assert.assertTrue("Temporary folder has not been cleanup.",
            filesStatus.length == 0);
    UtilsForTests.waitFor(1000);
    jInfo = remoteJTClient.getJobInfo(id);
    LOG.info("Waiting till the job is completed...");
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    LOG.info("MAX_MAP_TASK_ATTEMPTS is : " + MAX_MAP_TASK_ATTEMPTS);

    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);

    TTClient tClient = null;
    TTClient[] ttClients = null;

    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());

    //Assert if jobInfo is null
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
    boolean isOneTaskStored = false;
    String sometaskpid = null;
    org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null;
    TTClient myCli = null;
    for(TaskInfo info : myTaskInfos) {
      if(!info.isSetupOrCleanup()) {
        String[] taskTrackers = info.getTaskTrackers();
        for(String taskTracker : taskTrackers) {
          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
          TTClient ttCli =  cluster.getTTClient(ttInfo.getStatus().getHost());
          TaskID taskId = info.getTaskID();
          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(taskId);
          Assert.assertNotNull(ttTaskInfo);
          Assert.assertNotNull(ttTaskInfo.getConf());
          Assert.assertNotNull(ttTaskInfo.getUser());
          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0);
          Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0);
          //Get the pid of the task attempt. The task need not have
          //reported the pid of the task by the time we are checking
          //the pid. So perform null check.
          String pid = ttTaskInfo.getPid();
          int i = 1;
          while(pid.isEmpty()) {
            Thread.sleep(1000);
            LOG.info("Waiting for task to report its pid back");
            ttTaskInfo = ttCli.getProxy().getTask(taskId);
            pid = ttTaskInfo.getPid();
            if(i == 40) {
              Assert.fail("The task pid not reported for 40 seconds.");
            }
            i++;
          }
          if(!isOneTaskStored) {
            sometaskpid = pid;
            sometaskId = ttTaskInfo.getTaskStatus().getTaskID();
            myCli = ttCli;
            isOneTaskStored = true;
          }
          LOG.info("verified task progress to be between 0 and 1");
          State state = ttTaskInfo.getTaskStatus().getRunState();
          if (ttTaskInfo.getTaskStatus().getProgress() < 1.0 &&
              ttTaskInfo.getTaskStatus().getProgress() >0.0) {
            Assert.assertEquals(TaskStatus.State.RUNNING, state);
            LOG.info("verified run state as " + state);
          }
          FinishTaskControlAction action = new FinishTaskControlAction(
              org.apache.hadoop.mapred.TaskID.downgrade(info.getTaskID()));
          ttCli.getProxy().sendAction(action);
        }
      }
    }
    rJob.killJob();
    int i = 1;
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    for (TaskInfo tInfo : myTaskInfos) {
      if (!tInfo.isSetupOrCleanup()) {
        String[] taskTrackers = tInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
          TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
          Assert.assertNotNull("TTClient instance is null",ttCli);
          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
          Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
          while (ttTaskInfo.getTaskStatus().getRunState() !=
                 TaskStatus.State.RUNNING) {
            Thread.sleep(100);
            ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
          }
          testPermissionWithTaskController(ttCli, conf, info);
          FinishTaskControlAction action = new FinishTaskControlAction(TaskID
              .downgrade(tInfo.getTaskID()));
          for (TTClient cli : cluster.getTTClients()) {
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo taskInfo = null;
    long PER_TASK_LIMIT = 500L;
    Matcher mat = null;
    TTTaskInfo[] ttTaskinfo = null;
    String pid = null;
    TTClient ttClientIns = null;
    TTProtocol ttIns = null;
    TaskID tID = null;
    int counter = 0;

    String taskOverLimitPatternString =
        "TaskTree \\[pid=[0-9]*,tipID=.*\\] is "
        + "running beyond memory-limits. "
        + "Current usage : [0-9]*bytes. Limit : %sbytes. Killing task.";
   
    Pattern taskOverLimitPattern = Pattern.compile(String.format(
        taskOverLimitPatternString,
            String.valueOf(PER_TASK_LIMIT * 1024 * 1024L)));

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("String Appending");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrAppendMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);
    jobConf.setMemoryForMapTask(PER_TASK_LIMIT);
    jobConf.setMemoryForReduceTask(PER_TASK_LIMIT);
   
    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null",jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);
    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }
    Assert.assertTrue("Map process is not alive before task fails.",
        ttIns.isProcessTreeAlive(pid));
   
    while (ttIns.getTask(tID).getTaskStatus().getRunState()
        == TaskStatus.State.RUNNING) {
      UtilsForTests.waitFor(1000);
      ttIns = ttClientIns.getProxy();
    }

    String[] taskDiagnostics = runJob.getTaskDiagnostics(tAttID);
    Assert.assertNotNull("Task diagnostics is null", taskDiagnostics);

    for (String strVal : taskDiagnostics) {
      mat = taskOverLimitPattern.matcher(strVal);
      Assert.assertTrue("Taskover limit error message is not matched.",
          mat.find());
    }

    runJob.killJob();

    LOG.info("Waiting till the job is completed...");
    counter = 0;
    while (counter < 60) {
      if (jInfo.getStatus().isJobComplete()) {
        break;
      }
      UtilsForTests.waitFor(1000);
      jInfo = wovenClient.getJobInfo(id);
      counter ++;
    }
    Assert.assertTrue("Job has not been completed...", counter != 60);
    UtilsForTests.waitFor(1000);
    ttIns = ttClientIns.getProxy();
    ttIns.sendAction(action);
    UtilsForTests.waitFor(1000);
    Assert.assertTrue("Map process is still alive after task has been failed.",
        !ttIns.isProcessTreeAlive(pid));
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

    TaskInfo taskInfo = null;
    long PER_TASK_LIMIT = 500L;
    Matcher mat = null;
    TTTaskInfo[] ttTaskinfo = null;
    String pid = null;
    TTClient ttClientIns = null;
    TTProtocol ttIns = null;
    TaskID tID = null;
    int counter = 0;
   
    String taskOverLimitPatternString =
        "TaskTree \\[pid=[0-9]*,tipID=.*\\] is "
        + "running beyond memory-limits. "
        + "Current usage : [0-9]*bytes. Limit : %sbytes. Killing task.";

    Pattern taskOverLimitPattern = Pattern.compile(String.format(
        taskOverLimitPatternString,
            String.valueOf(PER_TASK_LIMIT * 1024 * 1024L)));

    JobConf jobConf = new JobConf(conf);
    jobConf.setJobName("String Appending");
    jobConf.setJarByClass(GenerateTaskChildProcess.class);
    jobConf.setMapperClass(GenerateTaskChildProcess.StrAppendMapper.class);
    jobConf.setNumMapTasks(1);
    jobConf.setNumReduceTasks(0);
    cleanup(outputDir, conf);
    FileInputFormat.setInputPaths(jobConf, inputDir);
    FileOutputFormat.setOutputPath(jobConf, outputDir);
    jobConf.setMemoryForMapTask(PER_TASK_LIMIT);
    jobConf.setMemoryForReduceTask(PER_TASK_LIMIT);

    JTClient jtClient = cluster.getJTClient();
    JobClient client = jtClient.getClient();
    JTProtocol wovenClient = cluster.getJTClient().getProxy();
    RunningJob runJob = client.submitJob(jobConf);
    JobID id = runJob.getID();
    JobInfo jInfo = wovenClient.getJobInfo(id);
    Assert.assertNotNull("Job information is null", jInfo);

    Assert.assertTrue("Job has not been started for 1 min.",
        jtClient.isJobStarted(id));

    TaskInfo[] taskInfos = wovenClient.getTaskInfo(id);
    for (TaskInfo taskinfo : taskInfos) {
      if (!taskinfo.isSetupOrCleanup()) {
        taskInfo = taskinfo;
        break;
      }
    }
    Assert.assertNotNull("Task information is null.", taskInfo);

    Assert.assertTrue("Task has not been started for 1 min.",
        jtClient.isTaskStarted(taskInfo));

    tID = TaskID.downgrade(taskInfo.getTaskID());
    TaskAttemptID tAttID = new TaskAttemptID(tID,0);
    FinishTaskControlAction action = new FinishTaskControlAction(tID);

    Collection<TTClient> ttClients = cluster.getTTClients();
    for (TTClient ttClient : ttClients) {
      TTProtocol tt = ttClient.getProxy();
      tt.sendAction(action);
      ttTaskinfo = tt.getTasks();
      for (TTTaskInfo tttInfo : ttTaskinfo) {
        if (!tttInfo.isTaskCleanupTask()) {
          pid = tttInfo.getPid();
          ttClientIns = ttClient;
          ttIns = tt;
          break;
        }
      }
      if (ttClientIns != null) {
        break;
      }
    }
    Assert.assertTrue("Map process is not alive before task fails.",
        ttIns.isProcessTreeAlive(pid));

    Assert.assertTrue("Task did not stop " + tID,
        ttClientIns.isTaskStopped(tID));

    String[] taskDiagnostics = runJob.getTaskDiagnostics(tAttID);
    Assert.assertNotNull("Task diagnostics is null.", taskDiagnostics);

    for (String strVal : taskDiagnostics) {
      mat = taskOverLimitPattern.matcher(strVal);
      Assert.assertTrue("Taskover limit error message is not matched.",
          mat.find());
    }

    LOG.info("Waiting till the job is completed...");
    counter = 0;
    Assert.assertTrue("Job has not been completed...",
        cluster.getJTClient().isJobStopped(id));
    ttIns = ttClientIns.getProxy();
    ttIns.sendAction(action);
    UtilsForTests.waitFor(1000);
    Assert.assertTrue("Map process is still alive after task has been failed.",
        !ttIns.isProcessTreeAlive(pid));
  }
View Full Code Here

Examples of org.apache.hadoop.mapreduce.test.system.TTClient

      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);

      //counter for job Loop
      countLoop++;

      TTClient tClient = null;
      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
      LOG.info("jInfo is :" + jInfo);

      //Assert if jobInfo is null
      Assert.assertNotNull("jobInfo is null", jInfo);

      //Wait for the job to start running.
      count = 0;
      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
        UtilsForTests.waitFor(10000);
        count++;
        jInfo = wovenClient.getJobInfo(rJob.getID());
        //If the count goes beyond a point, then break; This is to avoid
        //infinite loop under unforeseen circumstances. Testcase will anyway
        //fail later.
        if (count > 10) {
          Assert.fail("job has not reached running state for more than" +
            "100 seconds. Failing at this point");
        }
      }

      LOG.info("job id is :" + rJob.getID().toString());

      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
             .getTaskInfo(rJob.getID());

      boolean distCacheFileIsFound;
      
      for (TaskInfo taskInfo : taskInfos) {
        distCacheFileIsFound = false;
        String[] taskTrackers = taskInfo.getTaskTrackers();
        for (String taskTracker : taskTrackers) {
          //Formatting tasktracker to get just its FQDN
          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
          LOG.info("taskTracker is :" + taskTracker);

          //This will be entered from the second job onwards
          if (countLoop > 1) {
            if (taskTracker != null) {
              continueLoop = taskTrackerCollection.contains(taskTracker);
            }
            if (!continueLoop) {
              break;
            }
          }

          //Collecting the tasktrackers
          if (taskTracker != null
            taskTrackerCollection.add(taskTracker);

          //we have loopped through enough number of times to look for task
          // getting submitted on same tasktrackers.The same tasktracker
          //for subsequent jobs was not hit maybe because of  many number
          //of tasktrackers. So, testcase has to stop here.
          if (countLoop > 2) {
            continueLoop = false;
          }

          tClient = cluster.getTTClient(taskTracker);

          //tClient maybe null because the task is already dead. Ex: setup
          if (tClient == null) {
            continue;
          }

          String[] localDirs = tClient.getMapredLocalDirs();
          int distributedFileCount = 0;
          //Go to every single path
          for (String localDir : localDirs) {
            //Public Distributed cache will always be stored under
            //mapre.local.dir/tasktracker/archive
            localDir = localDir + Path.SEPARATOR +
                   TaskTracker.getPublicDistributedCacheDir();
            LOG.info("localDir is : " + localDir);

            //Get file status of all the directories
            //and files under that path.
            FileStatus[] fileStatuses = tClient.listStatus(localDir,
                true, true);
            for (FileStatus  fileStatus : fileStatuses) {
              Path path = fileStatus.getPath();
              LOG.info("path is :" + path.toString());
              //Checking if the received path ends with
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.