Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskAttemptContext


      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
        RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
        rr.initialize(splits.get(i), tac);
        while (rr.nextKeyValue()) {
          readCount++;
        }
View Full Code Here


        List<Pair<OutputCommitter, POStore>> committers =
            new ArrayList<Pair<OutputCommitter,POStore>>();
        for (POStore store : stores) {
            StoreFuncInterface sFunc = store.getStoreFunc();
           
            TaskAttemptContext updatedContext = setUpContext(context, store);
            try {
                committers.add(new Pair<OutputCommitter, POStore>(
                        sFunc.getOutputFormat().getOutputCommitter(
                                updatedContext), store));
            } catch (InterruptedException e) {
View Full Code Here

            POStore store) throws IOException {
        // Setup UDFContext so StoreFunc can make use of it
        MapRedUtil.setupUDFContext(context.getConfiguration());
        // make a copy of the context so that the actions after this call
        // do not end up updating the same context
        TaskAttemptContext contextCopy = new TaskAttemptContext(
                context.getConfiguration(), context.getTaskAttemptID());
       
        // call setLocation() on the storeFunc so that if there are any
        // side effects like setting map.output.dir on the Configuration
        // in the Context are needed by the OutputCommitter, those actions
View Full Code Here

    @Override
    public void abortTask(TaskAttemptContext context) throws IOException {       
        if(context.getTaskAttemptID().isMap()) {
            for (Pair<OutputCommitter, POStore> mapCommitter :
                mapOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        mapCommitter.second);
                mapCommitter.first.abortTask(updatedContext);
            }
        } else {
            for (Pair<OutputCommitter, POStore> reduceCommitter :
                reduceOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        reduceCommitter.second);
                reduceCommitter.first.abortTask(updatedContext);
            }
        }
    }
View Full Code Here

    @Override
    public void commitTask(TaskAttemptContext context) throws IOException {
        if(context.getTaskAttemptID().isMap()) {
            for (Pair<OutputCommitter, POStore> mapCommitter :
                mapOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        mapCommitter.second);
                mapCommitter.first.commitTask(updatedContext);
            }
        } else {
            for (Pair<OutputCommitter, POStore> reduceCommitter :
                reduceOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        reduceCommitter.second);
                reduceCommitter.first.commitTask(updatedContext);
            }
        }
    }
View Full Code Here

            throws IOException {
        boolean needCommit = false;
        if(context.getTaskAttemptID().isMap()) {
            for (Pair<OutputCommitter, POStore> mapCommitter :
                mapOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        mapCommitter.second);
                needCommit = needCommit ||
                mapCommitter.first.needsTaskCommit(updatedContext);
            }
            return needCommit;
        } else {
            for (Pair<OutputCommitter, POStore> reduceCommitter :
                reduceOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        reduceCommitter.second);
                needCommit = needCommit ||
                reduceCommitter.first.needsTaskCommit(updatedContext);
            }
            return needCommit;
View Full Code Here

    @Override
    public void setupTask(TaskAttemptContext context) throws IOException {
        if(context.getTaskAttemptID().isMap()) {
            for (Pair<OutputCommitter, POStore> mapCommitter :
                mapOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        mapCommitter.second);
                mapCommitter.first.setupTask(updatedContext);
            }
        } else {
            for (Pair<OutputCommitter, POStore> reduceCommitter :
                reduceOutputCommitters) {
                TaskAttemptContext updatedContext = setUpContext(context,
                        reduceCommitter.second);
                reduceCommitter.first.setupTask(updatedContext);
            }
        }
    }
View Full Code Here

    if (newApiCommitter) {
      org.apache.hadoop.mapreduce.v2.api.records.TaskId taskID = MRBuilderUtils
          .newTaskId(jobId, 0, TaskType.MAP);
      org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = MRBuilderUtils
          .newTaskAttemptId(taskID, 0);
      TaskAttemptContext taskContext = new TaskAttemptContextImpl(conf,
          TypeConverter.fromYarn(attemptID));
      OutputFormat outputFormat;
      try {
        outputFormat = ReflectionUtils.newInstance(taskContext
            .getOutputFormatClass(), conf);
        committer = outputFormat.getOutputCommitter(taskContext);
      } catch (Exception e) {
        throw new YarnRuntimeException(e);
      }
View Full Code Here

    //
    // Old Hadoop API
    //
    public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, new TaskAttemptID());
        List<org.apache.hadoop.mapreduce.InputSplit> newInputSplits = this.getSplits(tac);
        org.apache.hadoop.mapred.InputSplit[] oldInputSplits = new org.apache.hadoop.mapred.InputSplit[newInputSplits.size()];
        for (int i = 0; i < newInputSplits.size(); i++)
            oldInputSplits[i] = (ColumnFamilySplit)newInputSplits.get(i);
        return oldInputSplits;
View Full Code Here

        return oldInputSplits;
    }

    public org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, IColumn>> getRecordReader(org.apache.hadoop.mapred.InputSplit split, JobConf jobConf, final Reporter reporter) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID)))
        {
            @Override
            public void progress()
            {
                reporter.progress();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskAttemptContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.