Package org.apache.accumulo.examples.wikisearch.reader

Examples of org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader


    return splits;
  }

  @Override
  public RecordReader<LongWritable,Text> createRecordReader(InputSplit split, TaskAttemptContext context) {
    return new AggregatingRecordReader();
  }
View Full Code Here


    File f = createFile(xml1);
   
    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
    AggregatingRecordReader reader = new AggregatingRecordReader();
    try {
      // Clear the values for BEGIN and STOP TOKEN
      conf.set(AggregatingRecordReader.START_TOKEN, null);
      conf.set(AggregatingRecordReader.END_TOKEN, null);
      reader.initialize(split, ctx);
      // If we got here, then the code didnt throw an exception
      fail();
    } catch (Exception e) {
      // Do nothing, we succeeded
      f = null;
    }
    reader.close();
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "A", "B", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "E", "F", "");
    assertTrue(!reader.nextKeyValue());
   
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "E", "F", "");
    assertTrue(!reader.nextKeyValue());
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "A", "B", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue(!reader.nextKeyValue());
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "A", "B", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue(reader.nextKeyValue());
    try {
      testXML(reader.getCurrentValue(), "E", "", "");
      fail("Fragment returned, and it somehow passed XML parsing.");
    } catch (SAXParseException e) {
      // ignore
    }
    assertTrue(!reader.nextKeyValue());
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "A", "B", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue(reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "E", "F", "");
    assertTrue(!reader.nextKeyValue());
  }
View Full Code Here

    // Create FileSplit
    Path p = new Path(f.toURI().toString());
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(p, 0, f.length(), null),0);
   
    // Initialize the RecordReader
    AggregatingRecordReader reader = new AggregatingRecordReader();
    reader.initialize(split, ctx);
    assertTrue("Not enough records returned.", reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "A", "B", "G");
    assertTrue("Not enough records returned.", reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "C", "D", "");
    assertTrue("Not enough records returned.", reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "", "", "H");
    assertTrue("Not enough records returned.", reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "E", "F", "");
    assertTrue("Not enough records returned.", reader.nextKeyValue());
    testXML(reader.getCurrentValue(), "", "", "I");
    assertTrue("Too many records returned.", !reader.nextKeyValue());
  }
View Full Code Here

    File data = new File(url.toURI());
    Path tmpFile = new Path(data.getAbsolutePath());
   
    // Setup the Mapper
    WikipediaInputSplit split = new WikipediaInputSplit(new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null),0);
    AggregatingRecordReader rr = new AggregatingRecordReader();
    Path ocPath = new Path(tmpFile, "oc");
    OutputCommitter oc = new FileOutputCommitter(ocPath, context);
    fs.deleteOnExit(ocPath);
    StandaloneStatusReporter sr = new StandaloneStatusReporter();
    rr.initialize(split, context);
    MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
    WikipediaMapper mapper = new WikipediaMapper();
   
    // Load data into Mock Accumulo
    Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context(conf, id, rr, rw, oc, sr, split);
View Full Code Here

    File data = new File(url.toURI());
    Path tmpFile = new Path(data.getAbsolutePath());
   
    // Setup the Mapper
    InputSplit split = new FileSplit(tmpFile, 0, fs.pathToFile(tmpFile).length(), null);
    AggregatingRecordReader rr = new AggregatingRecordReader();
    Path ocPath = new Path(tmpFile, "oc");
    OutputCommitter oc = new FileOutputCommitter(ocPath, context);
    fs.deleteOnExit(ocPath);
    StandaloneStatusReporter sr = new StandaloneStatusReporter();
    rr.initialize(split, context);
    MockAccumuloRecordWriter rw = new MockAccumuloRecordWriter();
    WikipediaMapper mapper = new WikipediaMapper();
   
    // Load data into Mock Accumulo
    Mapper<LongWritable,Text,Text,Mutation>.Context con = mapper.new Context(conf, id, rr, rw, oc, sr, split);
View Full Code Here

TOP

Related Classes of org.apache.accumulo.examples.wikisearch.reader.AggregatingRecordReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.