Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskAttemptID


    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf,
          new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
      }
View Full Code Here


  public Iterator<HCatRecord> read() throws HCatException {

    HCatInputFormat inpFmt = new HCatInputFormat();
    RecordReader<WritableComparable, HCatRecord> rr;
    try {
      TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID());
      rr = inpFmt.createRecordReader(split, cntxt);
      rr.initialize(split, cntxt);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
View Full Code Here

    int id = sp.getId();
    setVarsInConf(id);
    HCatOutputFormat outFormat = new HCatOutputFormat();
    TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
        conf, new TaskAttemptID(ShimLoader.getHadoopShims().getHCatShim().createTaskID(), id));
    OutputCommitter committer = null;
    RecordWriter<WritableComparable<?>, HCatRecord> writer;
    try {
      committer = outFormat.getOutputCommitter(cntxt);
      committer.setupTask(cntxt);
View Full Code Here

    oc.commitJob(ojob);
  }

  private TaskAttemptContext createTaskAttemptContext(Configuration tconf) {
    Configuration conf = (tconf == null) ? (new Configuration()) : tconf;
    TaskAttemptID taskId = new TaskAttemptID();
    conf.setInt("mapred.task.partition", taskId.getId());
    conf.set("mapred.task.id", "attempt__0000_r_000000_" + taskId.getId());
    TaskAttemptContext rtaskContext = HCatMapRedUtil.createTaskAttemptContext(conf , taskId);
    return rtaskContext;
  }
View Full Code Here

      job.getCredentials().addAll(parentContext.getCredentials());
      success = job.waitForCompletion(true);
      fs.delete(workDir, true);
      //We only cleanup on success because failure might've been caused by existence of target directory
      if (localMode && success) {
        new ImporterOutputFormat().getOutputCommitter(HCatMapRedUtil.createTaskAttemptContext(conf, new TaskAttemptID())).commitJob(job);
      }
    } catch (InterruptedException e) {
      LOG.error("ImportSequenceFile Failed", e);
    } catch (ClassNotFoundException e) {
      LOG.error("ImportSequenceFile Failed", e);
View Full Code Here

    this.conf = null;
  }

  public void initialize(DoFn<?, ?> fn, Integer tid) {
    if (context == null || !Objects.equal(lastTID, tid)) {
      TaskAttemptID attemptID;
      if (tid != null) {
        TaskID taskId = new TaskID(new JobID(jobName, 0), false, tid);
        attemptID = new TaskAttemptID(taskId, 0);
        lastTID = tid;
      } else {
        attemptID = new TaskAttemptID();
        lastTID = null;
      }
      configureLocalFiles();
      context = TaskInputOutputContextFactory.create(getConfiguration(), attemptID, new SparkReporter(counters));
    }
View Full Code Here

        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
        // params are really not needed here where we are just reading the
        // input completely
View Full Code Here

        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
        // params are really not needed here where we are just reading the
        // input completely
View Full Code Here

    //
    // Old Hadoop API
    //
    public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, new TaskAttemptID());
        List<org.apache.hadoop.mapreduce.InputSplit> newInputSplits = this.getSplits(tac);
        org.apache.hadoop.mapred.InputSplit[] oldInputSplits = new org.apache.hadoop.mapred.InputSplit[newInputSplits.size()];
        for (int i = 0; i < newInputSplits.size(); i++)
            oldInputSplits[i] = (ColumnFamilySplit)newInputSplits.get(i);
        return oldInputSplits;
View Full Code Here

   */
  public MockMapContext(final Configuration configuration,
                        final List<Pair<KEYIN, VALUEIN>> in,
                        final Counters counters) {
    super(configuration,
        new TaskAttemptID("mrunit-jt", 0, TaskType.MAP, 0, 0),
        null, null, new MockOutputCommitter(), new MockReporter(counters), null);
    this.inputIter = in.iterator();
    this.output = new MockOutputCollector<KEYOUT, VALUEOUT>();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskAttemptID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.