Package org.apache.hadoop.mapreduce

Examples of org.apache.hadoop.mapreduce.TaskAttemptContext


    //
    // Old Hadoop API
    //
    public org.apache.hadoop.mapred.InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException
    {
        TaskAttemptContext tac = new TaskAttemptContext(jobConf, new TaskAttemptID());
        List<org.apache.hadoop.mapreduce.InputSplit> newInputSplits = this.getSplits(tac);
        org.apache.hadoop.mapred.InputSplit[] oldInputSplits = new org.apache.hadoop.mapred.InputSplit[newInputSplits.size()];
        for (int i = 0; i < newInputSplits.size(); i++)
            oldInputSplits[i] = (ColumnFamilySplit)newInputSplits.get(i);
        return oldInputSplits;
View Full Code Here


      // validate input split
      InputSplit split = splits.get(i);
      Assert.assertTrue(split instanceof TableSnapshotRegionSplit);

      // validate record reader
      TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
      when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
      RecordReader<ImmutableBytesWritable, Result> rr = tsif.createRecordReader(split, taskAttemptContext);
      rr.initialize(split, taskAttemptContext);

      // validate we can read all the data back
      while (rr.nextKeyValue()) {
View Full Code Here

    JobContext job = ContextFactory.createJobContext();
    AccumuloRowInputFormat.setInputInfo(job.getConfiguration(), "root", "".getBytes(), "test", new Authorizations());
    AccumuloRowInputFormat.setMockInstance(job.getConfiguration(), "instance1");
    AccumuloRowInputFormat crif = new AccumuloRowInputFormat();
    RangeInputSplit ris = new RangeInputSplit();
    TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
    RecordReader<Text,PeekingIterator<Entry<Key,Value>>> rr = crif.createRecordReader(ris, tac);
    rr.initialize(ris, tac);
   
    assertTrue(rr.nextKeyValue());
    assertEquals(new Text("row1"), rr.getCurrentKey());
View Full Code Here

    AccumuloOutputFormat output = new AccumuloOutputFormat();

    TestMapper mapper = (TestMapper) job.getMapperClass().newInstance();
    for (InputSplit split : splits) {
      TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
      RecordReader<Key,Value> reader = input.createRecordReader(split, tac);
      RecordWriter<Text,Mutation> writer = output.getRecordWriter(tac);
      Mapper<Key,Value,Text,Mutation>.Context context = ContextFactory.createMapContext(mapper, tac, reader, writer, split);
      reader.initialize(split, context);
      mapper.run(context);
View Full Code Here

   
    // when running a mapreduce, you won't need to instantiate the output
    // format and record writer
    // mapreduce will do that for you, and you will just use
    // output.collect(tableName, mutation)
    TaskAttemptContext context = ContextFactory.createTaskAttemptContext(job);
    RecordWriter<Text,Mutation> rw = new AccumuloOutputFormat().getRecordWriter(context);
   
    Text colf = new Text("colfam");
    System.out.println("writing ...");
    for (int i = 0; i < 10000; i++) {
View Full Code Here

      Assert.assertArrayEquals(new byte[0], risplit.getPassword());
      Assert.assertEquals("testtable", risplit.getTable());
      Assert.assertEquals(new Authorizations(), risplit.getAuths());
      Assert.assertEquals("testmapinstance", risplit.getInstanceName());

      TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
      RecordReader<Key,Value> reader = input.createRecordReader(split, tac);
      Mapper<Key,Value,Key,Value>.Context context = ContextFactory.createMapContext(mapper, tac, reader, null, split);
      reader.initialize(split, context);
      mapper.run(context);
    }
View Full Code Here

    JobContext job = ContextFactory.createJobContext();
    AccumuloInputFormat.setInputInfo(job.getConfiguration(), "root", "".getBytes(), "testtable2", new Authorizations());
    AccumuloInputFormat.setMockInstance(job.getConfiguration(), "testmapinstance");
    AccumuloInputFormat input = new AccumuloInputFormat();
    RangeInputSplit ris = new RangeInputSplit();
    TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
    RecordReader<Key,Value> rr = input.createRecordReader(ris, tac);
    rr.initialize(ris, tac);

    TestMapper mapper = new TestMapper();
    Mapper<Key,Value,Key,Value>.Context context = ContextFactory.createMapContext(mapper, tac, rr, null, ris);
View Full Code Here

    AccumuloInputFormat.setMockInstance(job.getConfiguration(), "testmapinstance");
    final String regex = ".*1.*";
    AccumuloInputFormat.setRegex(job, org.apache.accumulo.core.client.mapreduce.InputFormatBase.RegexType.ROW, regex);
    AccumuloInputFormat input = new AccumuloInputFormat();
    RangeInputSplit ris = new RangeInputSplit();
    TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
    RecordReader<Key,Value> rr = input.createRecordReader(ris, tac);
    rr.initialize(ris, tac);

    Pattern p = Pattern.compile(regex);
    while (rr.nextKeyValue()) {
View Full Code Here

    TestMapper mapper = (TestMapper) job.getMapperClass().newInstance();

    RangeInputSplit emptySplit = new RangeInputSplit();

    // Using an empty split should fall back to the information in the Job's Configuration
    TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
    RecordReader<Key,Value> reader = input.createRecordReader(emptySplit, tac);
    Mapper<Key,Value,Key,Value>.Context context = ContextFactory.createMapContext(mapper, tac, reader, null, emptySplit);
    reader.initialize(emptySplit, context);
    mapper.run(context);
  }
View Full Code Here

    RangeInputSplit emptySplit = new RangeInputSplit();
    emptySplit.setUsername("root");
    emptySplit.setPassword("anythingelse".getBytes());

    // Using an empty split should fall back to the information in the Job's Configuration
    TaskAttemptContext tac = ContextFactory.createTaskAttemptContext(job);
    RecordReader<Key,Value> reader = input.createRecordReader(emptySplit, tac);
    Mapper<Key,Value,Key,Value>.Context context = ContextFactory.createMapContext(mapper, tac, reader, null, emptySplit);
    reader.initialize(emptySplit, context);
    mapper.run(context);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.mapreduce.TaskAttemptContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.