Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.CannedTokenStream


          @Override
          public TokenStream getTokenStream() {
            if (count == 0) {
              count++;
              return new CannedTokenStream(new Token[] {
                  token("p", 1, 1),
                  token("q", 1, 1),
                  token("r", 0, 1),
                  token("s", 0, 1),
                });
            } else {
              return new CannedTokenStream(new Token[] {
                  token("p", 1, 1),
                });
            }
          }
        
View Full Code Here


            public TokenStream getTokenStream() {
              Token a = new Token("a", 0, 1);
              a.setPositionIncrement(1);
              Token b = new Token("b", 0, 1);
              b.setPositionIncrement(0);
              return new CannedTokenStream(new Token[] {a, b});
            }
        
            @Override
            protected void setReader(final Reader reader) throws IOException {
            }
View Full Code Here

  }
 
  // LUCENE-4880
  public void testEmptyString() throws IOException {
    MemoryIndex memory = new MemoryIndex();
    memory.addField("foo", new CannedTokenStream(new Token("", 0, 5)));
    IndexSearcher searcher = memory.createSearcher();
    TopDocs docs = searcher.search(new TermQuery(new Term("foo", "")), 10);
    assertEquals(1, docs.totalHits);
  }
View Full Code Here

    tokens[2].append("c");
    tokens[2].setPositionIncrement(0);

    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(new TextField("field", new CannedTokenStream(tokens)));
    writer.addDocument(doc);
    doc = new Document();
    doc.add(new TextField("field", new CannedTokenStream(tokens)));
    writer.addDocument(doc);
    IndexReader r = writer.getReader();
    writer.close();
    IndexSearcher s = newSearcher(r);
    MultiPhraseQuery mpq = new MultiPhraseQuery();
View Full Code Here

  private void doTestZeroPosIncrSloppy(Query q, int nExpected) throws IOException {
    Directory dir = newDirectory(); // random dir
    IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
    IndexWriter writer = new IndexWriter(dir, cfg);
    Document doc = new Document();
    doc.add(new TextField("field", new CannedTokenStream(INCR_0_DOC_TOKENS)));
    writer.addDocument(doc);
    IndexReader r = DirectoryReader.open(writer,false);
    writer.close();
    IndexSearcher s = newSearcher(r);
   
View Full Code Here

    doc.add( new Field( "field_super_exact", fieldValue, matched ) )// Whitespace tokenized without toLower
    doc.add( new Field( "field_characters", fieldValue, matched ) );   // Each letter is a token
    doc.add( new Field( "field_tripples", fieldValue, matched ) );     // Every three letters is a token
    doc.add( new Field( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
      Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
    doc.add( new Field( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
          token( "der", 1, 0, 3 ),
          token( "red", 0, 0, 3 )
        ), matched ) );

    final Map<String, Analyzer> fieldAnalyzers = new TreeMap<String, Analyzer>();
View Full Code Here

    Document doc = new Document();
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorOffsets(true);
    Field field = new Field("foo", "", ft);
    field.setTokenStream(new CannedTokenStream(
        new Token("bar", 5, 10), new Token("bar", 1, 4)
    ));
    doc.add(field);
    iw.addDocument(doc);
    iw.close();
View Full Code Here

      makeToken("a", 1, 0, 6),
      makeToken("b", 1, 8, 9),
      makeToken("a", 1, 9, 17),
      makeToken("c", 1, 19, 50),
    };
    doc.add(new Field("content", new CannedTokenStream(tokens), ft));

    w.addDocument(doc);
    IndexReader r = w.getReader();
    w.close();
View Full Code Here

        // stuff abs position into type:
        token.setType(""+pos);
        offset += offIncr + tokenOffset;
        //System.out.println("  " + token + " posIncr=" + token.getPositionIncrement() + " pos=" + pos + " off=" + token.startOffset() + "/" + token.endOffset() + " (freq=" + postingsByDoc.get(docCount).size() + ")");
      }
      doc.add(new Field("content", new CannedTokenStream(tokens.toArray(new Token[tokens.size()])), ft));
      w.addDocument(doc);
    }
    final DirectoryReader r = w.getReader();
    w.close();
View Full Code Here

    Token t1 = new Token("foo", 0, Integer.MAX_VALUE-500);
    if (random().nextBoolean()) {
      t1.setPayload(new BytesRef("test"));
    }
    Token t2 = new Token("foo", Integer.MAX_VALUE-500, Integer.MAX_VALUE);
    TokenStream tokenStream = new CannedTokenStream(
        new Token[] { t1, t2 }
    );
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    // store some term vectors for the checkindex cross-check
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.CannedTokenStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.