Package org.apache.lucene.analysis.standard

Examples of org.apache.lucene.analysis.standard.StandardFilter


      throws IOException {
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      streams.source = new StandardTokenizer(reader);
      streams.result = new StandardFilter(streams.source);
      streams.result = new StopFilter(streams.result, stoptable);
      streams.result = new FrenchStemFilter(streams.result, excltable);
      // Convert to lowercase after stemming!
      streams.result = new LowerCaseFilter(streams.result);
      setPreviousTokenStream(streams);
View Full Code Here


   * @return  A {@link TokenStream} built from a {@link StandardTokenizer} filtered with
   *       {@link StandardFilter}, {@link LowerCaseFilter}, and {@link StopFilter}
   */
  public final TokenStream tokenStream( String fieldName, Reader reader ) {
    TokenStream result = new StandardTokenizer( reader );
    result = new StandardFilter( result );
    result = new LowerCaseFilter( result );
    result = new StopFilter( result, stoptable );
    return result;
  }
View Full Code Here

      throws IOException {
      SavedStreams streams = (SavedStreams) getPreviousTokenStream();
      if (streams == null) {
        streams = new SavedStreams();
        streams.source = new StandardTokenizer(reader);
        streams.result = new StandardFilter(streams.source);
        streams.result = new LowerCaseFilter(streams.result);
        streams.result = new StopFilter(streams.result, stoptable);
        setPreviousTokenStream(streams);
      } else {
        streams.source.reset(reader);
View Full Code Here

   *         {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}, and
   *         {@link GermanStemFilter}
   */
  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new StandardTokenizer(reader);
    result = new StandardFilter(result);
    result = new LowerCaseFilter(result);
    result = new StopFilter(result, stopSet);
    result = new GermanStemFilter(result, exclusionSet);
    return result;
  }
View Full Code Here

   
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      streams.source = new StandardTokenizer(reader);
      streams.result = new StandardFilter(streams.source);
      streams.result = new LowerCaseFilter(streams.result);
      streams.result = new StopFilter(streams.result, stopSet);
      streams.result = new GermanStemFilter(streams.result, exclusionSet);
      setPreviousTokenStream(streams);
    } else {
View Full Code Here

    setOverridesTokenStreamMethod(ThaiAnalyzer.class);
  }
 
  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream ts = new StandardTokenizer(reader);
    ts = new StandardFilter(ts);
    ts = new ThaiWordFilter(ts);
    ts = new StopFilter(ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
    return ts;
  }
View Full Code Here

   
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      streams.source = new StandardTokenizer(reader);
      streams.result = new StandardFilter(streams.source);
      streams.result = new ThaiWordFilter(streams.result);
      streams.result = new StopFilter(streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
      setPreviousTokenStream(streams);
    } else {
      streams.source.reset(reader);
View Full Code Here

   *          {@link BrazilianStemFilter}.
   */
  public final TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new StandardTokenizer( reader );
    result = new LowerCaseFilter( result );
    result = new StandardFilter( result );
    result = new StopFilter( result, stoptable );
    result = new BrazilianStemFilter( result, excltable );
    return result;
  }
View Full Code Here

      SavedStreams streams = (SavedStreams) getPreviousTokenStream();
      if (streams == null) {
        streams = new SavedStreams();
        streams.source = new StandardTokenizer(reader);
        streams.result = new LowerCaseFilter(streams.source);
        streams.result = new StandardFilter(streams.result);
        streams.result = new StopFilter(streams.result, stoptable);
        streams.result = new BrazilianStemFilter(streams.result, excltable);
        setPreviousTokenStream(streams);
      } else {
        streams.source.reset(reader);
View Full Code Here

            tok = new LowerCaseFilter(matchVersion, tok);
            return new TokenStreamComponents(src, tok);
        } else {
            final StandardTokenizer src = new StandardTokenizer(matchVersion, reader);
            src.setMaxTokenLength(maxTokenLength);
            TokenStream tok = new StandardFilter(matchVersion, src);
            // prior to this we get the classic behavior, standardfilter does it for
            // us.
            tok = new SKOSLabelFilter(tok, skosEngine, new StandardAnalyzer(
                    matchVersion), bufferSize, types);
            tok = new LowerCaseFilter(matchVersion, tok);
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.standard.StandardFilter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.