Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskAttemptContext


    tableSplit = convertFilter(jobConf, scan, tableSplit, iKey,
      getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec,
      jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string")));
    setScan(scan);
    Job job = new Job(jobConf);
    TaskAttemptContext tac = ShimLoader.getHadoopShims().newTaskAttemptContext(
        job.getConfiguration(), reporter);

    final org.apache.hadoop.mapreduce.RecordReader<ImmutableBytesWritable, Result>
    recordReader = createRecordReader(tableSplit, tac);
View Full Code Here


    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf, new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
      }
View Full Code Here

    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf,
          new TaskAttemptID());
      RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
      rr.initialize(splits.get(i), tac);
      while (rr.nextKeyValue()) {
        readCount++;
View Full Code Here

  public Iterator<HCatRecord> read() throws HCatException {

    HCatInputFormat inpFmt = new HCatInputFormat();
    RecordReader<WritableComparable, HCatRecord> rr;
    try {
      TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(conf, new TaskAttemptID());
      rr = inpFmt.createRecordReader(split, cntxt);
      rr.initialize(split, cntxt);
    } catch (IOException e) {
      throw new HCatException(ErrorType.ERROR_NOT_INITIALIZED, e);
    } catch (InterruptedException e) {
View Full Code Here

  public void write(Iterator<HCatRecord> recordItr) throws HCatException {

    int id = sp.getId();
    setVarsInConf(id);
    HCatOutputFormat outFormat = new HCatOutputFormat();
    TaskAttemptContext cntxt = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(
        conf, new TaskAttemptID(ShimLoader.getHadoopShims().getHCatShim().createTaskID(), id));
    OutputCommitter committer = null;
    RecordWriter<WritableComparable<?>, HCatRecord> writer;
    try {
      committer = outFormat.getOutputCommitter(cntxt);
View Full Code Here

    OutputCommitter oc = oupy.getOutputCommitter(createTaskAttemptContext(ojob.getConfiguration()));
    oc.setupJob(ojob);

    for (InputSplit split : inpy.getSplits(ijob)){

      TaskAttemptContext rtaskContext = createTaskAttemptContext(ijob.getConfiguration());
      TaskAttemptContext wtaskContext = createTaskAttemptContext(ojob.getConfiguration());

      RecordReader<WritableComparable, HCatRecord> rr = inpy.createRecordReader(split, rtaskContext);
      rr.initialize(split, rtaskContext);

      OutputCommitter taskOc = oupy.getOutputCommitter(wtaskContext);
View Full Code Here

  private TaskAttemptContext createTaskAttemptContext(Configuration tconf) {
    Configuration conf = (tconf == null) ? (new Configuration()) : tconf;
    TaskAttemptID taskId = new TaskAttemptID();
    conf.setInt("mapred.task.partition", taskId.getId());
    conf.set("mapred.task.id", "attempt__0000_r_000000_" + taskId.getId());
    TaskAttemptContext rtaskContext = HCatMapRedUtil.createTaskAttemptContext(conf , taskId);
    return rtaskContext;
  }
View Full Code Here

        }
        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
        // params are really not needed here where we are just reading the
View Full Code Here

        // make a copy of the Context to use here - since in the same
        // task (map or reduce) we could have multiple stores, we should
        // make this copy so that the same context does not get over-written
        // by the different stores.
        this.context = new TaskAttemptContext(outputConf,
                context.getTaskAttemptID());
    }
View Full Code Here

        }
        if(reader != null){
            reader.close();
        }
        InputSplit curSplit = inpSplits.get(curSplitIndex);
        TaskAttemptContext tAContext = new TaskAttemptContext(conf,
                new TaskAttemptID());
        reader = inputFormat.createRecordReader(curSplit, tAContext);
        reader.initialize(curSplit, tAContext);
        // create a dummy pigsplit - other than the actual split, the other
        // params are really not needed here where we are just reading the
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskAttemptContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.