Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.JobContext


    private void checkOutputSpecsHelper(List<POStore> stores, JobContext
            jobcontext) throws IOException, InterruptedException {
        for (POStore store : stores) {
            // make a copy of the original JobContext so that
            // each OutputFormat get a different copy
            JobContext jobContextCopy = new JobContext(
                    jobcontext.getConfiguration(), jobcontext.getJobID());
           
            // set output location
            PigOutputFormat.setLocation(jobContextCopy, store);
           
View Full Code Here


     * @param job
     * @param st
     * @throws IOException
     */
    private void storeSchema(Job job, POStore st) throws IOException {
        JobContext jc = new JobContext(job.getJobConf(),
                new org.apache.hadoop.mapreduce.JobID());
        JobContext updatedJc = PigOutputCommitter.setUpContext(jc, st);
        PigOutputCommitter.storeCleanup(st, updatedJc.getConfiguration());
    }
View Full Code Here

      getStorageFormatOfKey(columnsMapping.get(iKey).mappingSpec,
      jobConf.get(HBaseSerDe.HBASE_TABLE_DEFAULT_STORAGE_TYPE, "string")));

    setScan(scan);
    Job job = new Job(jobConf);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);
    Path [] tablePaths = FileInputFormat.getInputPaths(jobContext);

    List<org.apache.hadoop.mapreduce.InputSplit> splits =
      super.getSplits(jobContext);
    InputSplit [] results = new InputSplit[splits.size()];
View Full Code Here

    writer.close();

    RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
    Configuration jonconf = new Configuration(cloneConf);
    jonconf.set("mapred.input.dir", testDir.toString());
    JobContext context = new Job(jonconf);
    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf, new TaskAttemptID());
View Full Code Here

        for (FileStatus st : status) {
          LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>();
          Warehouse.makeSpecFromName(fullPartSpec, st.getPath());
          partitionsDiscoveredByPath.put(st.getPath().toString(), fullPartSpec);
          JobConf jobConf = (JobConf)context.getConfiguration();
          JobContext currContext = HCatMapRedUtil.createJobContext(
            jobConf,
            context.getJobID(),
            InternalUtil.createReporter(HCatMapRedUtil.createTaskAttemptContext(jobConf,
              ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID())));
          HCatOutputFormat.configureOutputStorageHandler(currContext, jobInfo, fullPartSpec);
View Full Code Here

    writer.close();

    RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable> inputFormat = new RCFileMapReduceInputFormat<LongWritable, BytesRefArrayWritable>();
    Configuration jonconf = new Configuration(cloneConf);
    jonconf.set("mapred.input.dir", testDir.toString());
    JobContext context = new Job(jonconf);
    context.getConfiguration().setLong("mapred.max.split.size", maxSplitSize);
    List<InputSplit> splits = inputFormat.getSplits(context);
    assertEquals("splits length should be " + splitNumber, splits.size(), splitNumber);
    int readCount = 0;
    for (int i = 0; i < splits.size(); i++) {
      TaskAttemptContext tac = ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptContext(jonconf,
View Full Code Here

  createRecordReader(InputSplit split,
             TaskAttemptContext taskContext) throws IOException, InterruptedException {

    HCatSplit hcatSplit = InternalUtil.castToHCatSplit(split);
    PartInfo partitionInfo = hcatSplit.getPartitionInfo();
    JobContext jobContext = taskContext;
    Configuration conf = jobContext.getConfiguration();

    HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(
      conf, partitionInfo);

    JobConf jobConf = HCatUtil.getJobConfFromContext(jobContext);
View Full Code Here

        for (FileStatus st : status) {
          LinkedHashMap<String, String> fullPartSpec = new LinkedHashMap<String, String>();
          Warehouse.makeSpecFromName(fullPartSpec, st.getPath());
          partitionsDiscoveredByPath.put(st.getPath().toString(), fullPartSpec);
          JobConf jobConf = (JobConf)context.getConfiguration();
          JobContext currContext = HCatMapRedUtil.createJobContext(
            jobConf,
            context.getJobID(),
            InternalUtil.createReporter(HCatMapRedUtil.createTaskAttemptContext(jobConf,
              ShimLoader.getHadoopShims().getHCatShim().createTaskAttemptID())));
          HCatOutputFormat.configureOutputStorageHandler(currContext, jobInfo, fullPartSpec);
View Full Code Here

    TableMapReduceUtil.initCredentials(jc);

    String hbaseTableName = jc.get(HBaseSerDe.HBASE_TABLE_NAME);
    jc.set(TableOutputFormat.OUTPUT_TABLE, hbaseTableName);
    Job job = new Job(jc);
    JobContext jobContext = ShimLoader.getHadoopShims().newJobContext(job);

    try {
      checkOutputSpecs(jobContext);
    } catch (InterruptedException e) {
      throw new IOException(e);
View Full Code Here

        // The above setLocation call could write to the conf within
        // the job - get a hold of the modified conf
        conf = job.getConfiguration();
        inputFormat = wrappedLoadFunc.getInputFormat();
        try {
            inpSplits = inputFormat.getSplits(new JobContext(conf,
                    new JobID()));
        } catch (InterruptedException e) {
            throw new IOException(e);
        }       
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.JobContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.