Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.JobContext


* This class wrapping all static method. PigMapReduce, PigMapBase, MiniCluster wrapping hadoop
* version dependant implementaton of PigGenericMapReduce, PigGenericMapBase and MiniGenericCluster.
**/
public class HadoopShims {
    static public JobContext cloneJobContext(JobContext original) throws IOException, InterruptedException {
        JobContext newContext = new JobContext(original.getConfiguration(), original.getJobID());
        return newContext;
    }
View Full Code Here


        return newContext;
    }
   
    static public JobContext createJobContext(Configuration conf,
            JobID jobId) {
        JobContext newJobContext = new JobContext(
                conf, jobId);
        return newJobContext;
    }
View Full Code Here

public class TestRCFileOutputStorageDriver extends TestCase {

  public void testConversion() throws IOException {
    Configuration conf = new Configuration();
    JobContext jc = new JobContext(conf, new JobID());

    HowlSchema schema = buildHiveSchema();
    HowlInputStorageDriver isd = new RCFileInputDriver();

    isd.setOriginalSchema(jc, schema);
View Full Code Here

    writer.close();
    BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};

    HowlSchema schema = buildHiveSchema();
    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, schema);
    sd.setOutputSchema(jc, schema);
View Full Code Here

    writer.append(bytes2);
    writer.close();
    BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildPrunedSchema());

    sd.initialize(jc, getProps());
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getPrunedRecords();
    for(int j=0; j < 2; j++){
View Full Code Here

    writer.append(bytes2);
    writer.close();
    BytesRefArrayWritable[] bytesArr = new BytesRefArrayWritable[]{bytes,bytes2};

    RCFileInputDriver sd = new RCFileInputDriver();
    JobContext jc = new JobContext(conf, new JobID());
    sd.setInputPath(jc, file.toString());
    InputFormat<?,?> iF = sd.getInputFormat(null);
    InputSplit split = iF.getSplits(jc).get(0);
    sd.setOriginalSchema(jc, buildHiveSchema());
    sd.setOutputSchema(jc, buildReorderedSchema());

    sd.initialize(jc, getProps());
    Map<String,String> map = new HashMap<String,String>(1);
    map.put("part1", "first-part");
    sd.setPartitionValues(jc, map);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,jc.getConfiguration().get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR));
    TaskAttemptContext tac = new TaskAttemptContext(conf, new TaskAttemptID());
    RecordReader<?,?> rr = iF.createRecordReader(split,tac);
    rr.initialize(split, tac);
    HowlRecord[] tuples = getReorderedCols();
    for(int j=0; j < 2; j++){
View Full Code Here

      writer.close();

      RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
      Configuration jonconf = new Configuration(cloneConf);
      jonconf.set("mapred.input.dir", testDir.toString());
      JobContext context = new Job(jonconf);
      context.getConfiguration().setLong("mapred.max.split.size",maxSplitSize);
      List<InputSplit> splits = inputFormat.getSplits(context);
      assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
      int readCount = 0;
      for (int i = 0; i < splits.size(); i++) {
        TaskAttemptContext tac = new TaskAttemptContext(jonconf, new TaskAttemptID());
View Full Code Here

   
    static JobContext setUpContext(JobContext context,
            POStore store) throws IOException {
        // make a copy of the context so that the actions after this call
        // do not end up updating the same context
        JobContext contextCopy = new JobContext(
                context.getConfiguration(), context.getJobID());
       
        // call setLocation() on the storeFunc so that if there are any
        // side effects like setting map.output.dir on the Configuration
        // in the Context are needed by the OutputCommitter, those actions
View Full Code Here

   
    @Override
    public void cleanupJob(JobContext context) throws IOException {
        // call clean up on all map and reduce committers
        for (Pair<OutputCommitter, POStore> mapCommitter : mapOutputCommitters) {           
            JobContext updatedContext = setUpContext(context,
                    mapCommitter.second);
            storeCleanup(mapCommitter.second, updatedContext.getConfiguration());
            mapCommitter.first.cleanupJob(updatedContext);
        }
        for (Pair<OutputCommitter, POStore> reduceCommitter :
            reduceOutputCommitters) {           
            JobContext updatedContext = setUpContext(context,
                    reduceCommitter.second);
            storeCleanup(reduceCommitter.second, updatedContext.getConfiguration());
            reduceCommitter.first.cleanupJob(updatedContext);
        }
      
    }
View Full Code Here

   
    @Override
    public void setupJob(JobContext context) throws IOException {
        // call set up on all map and reduce committers
        for (Pair<OutputCommitter, POStore> mapCommitter : mapOutputCommitters) {
            JobContext updatedContext = setUpContext(context,
                    mapCommitter.second);
            mapCommitter.first.setupJob(updatedContext);
        }
        for (Pair<OutputCommitter, POStore> reduceCommitter :
            reduceOutputCommitters) {
            JobContext updatedContext = setUpContext(context,
                    reduceCommitter.second);
            reduceCommitter.first.setupJob(updatedContext);
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.JobContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.