Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.WhitespaceTokenizer


    for(int i=0;i<50;i++)
      b[i] = (byte) (i+77);

    Document doc = new Document();
    Field f = new Field("binary", b, 10, 17, Field.Store.YES);
    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc1field1")));
    Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc1field2")));
    doc.add(f);
    doc.add(f2);
    w.addDocument(doc);
   
    // add 2 docs to test in-memory merging
    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc2field1")));
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc2field2")));
    w.addDocument(doc);
 
    // force segment flush so we can force a segment merge with doc3 later.
    w.commit();

    f.setTokenStream(new WhitespaceTokenizer(new StringReader("doc3field1")));
    f2.setTokenStream(new WhitespaceTokenizer(new StringReader("doc3field2")));

    w.addDocument(doc);
    w.commit();
    w.optimize();   // force segment merge.
View Full Code Here


            fieldToData.put(field, new PayloadData(numFieldInstancesToSkip, data, offset, length));
        }
       
        public TokenStream tokenStream(String fieldName, Reader reader) {
            PayloadData payload = (PayloadData) fieldToData.get(fieldName);
            TokenStream ts = new WhitespaceTokenizer(reader);
            if (payload != null) {
                if (payload.numFieldInstancesToSkip == 0) {
                    ts = new PayloadFilter(ts, payload.data, payload.offset, payload.length);
                } else {
                    payload.numFieldInstancesToSkip--;
View Full Code Here

  public void testAlternate() throws IOException {
    Analyzer a = new Analyzer() {
      @Override
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new ICUNormalizer2Filter(
            new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader),
            /* specify nfc with decompose to get nfd */
            Normalizer2.getInstance(null, "nfc", Normalizer2.Mode.DECOMPOSE));
      }
    };
   
View Full Code Here

  public void testRandomStrings() throws Exception {
    final Transliterator transform = Transliterator.getInstance("Any-Latin");
    Analyzer a = new ReusableAnalyzerBase() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
        return new TokenStreamComponents(tokenizer, new ICUTransformFilter(tokenizer, transform));
      }
    };
    checkRandomData(random, a, 1000*RANDOM_MULTIPLIER);
  }
View Full Code Here

    for(int i=0;i<50;i++)
      b[i] = (byte) (i+77);

    Document doc = new Document();
    Field f = new Field("binary", b, 10, 17);
    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
    Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED);
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
    doc.add(f);
    doc.add(f2);
    w.addDocument(doc);
   
    // add 2 docs to test in-memory merging
    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field1")));
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc2field2")));
    w.addDocument(doc);
 
    // force segment flush so we can force a segment merge with doc3 later.
    w.commit();

    f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field1")));
    f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc3field2")));

    w.addDocument(doc);
    w.commit();
    w.optimize();   // force segment merge.
    w.close();
View Full Code Here

  }

  public void testDocumentsWriterExceptions() throws IOException {
    Analyzer analyzer = new Analyzer() {
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new CrashingFilter(fieldName, new WhitespaceTokenizer(reader));
      }
    };

    for(int i=0;i<2;i++) {
      MockRAMDirectory dir = new MockRAMDirectory();
View Full Code Here

  }

  public void testDocumentsWriterExceptionThreads() throws IOException {
    Analyzer analyzer = new Analyzer() {
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new CrashingFilter(fieldName, new WhitespaceTokenizer(reader));
      }
    };

    final int NUM_THREAD = 3;
    final int NUM_ITER = 100;
View Full Code Here

public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
    private TokenStream input;
   
    public void setUp() throws Exception {
        super.setUp();
        input = new WhitespaceTokenizer(new StringReader("abcde"));
    }
View Full Code Here

      NGramTokenFilter filter = new NGramTokenFilter(input, 6, 7);
      assertTokenStreamContents(filter, new String[0], new int[0], new int[0]);
    }
   
    public void testSmallTokenInStream() throws Exception {
      input = new WhitespaceTokenizer(new StringReader("abc de fgh"));
      NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
      assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,0}, new int[]{3,3});
    }
View Full Code Here

      NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
      assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,0}, new int[]{3,3});
    }
   
    public void testReset() throws Exception {
      WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(new StringReader("abcde"));
      NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
      assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
      tokenizer.reset(new StringReader("abcde"));
      assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.WhitespaceTokenizer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.