Package org.apache.lucene.analysis.ngram

Examples of org.apache.lucene.analysis.ngram.NGramTokenizer


        minGramSize = (minArg != null ? Integer.parseInt(minArg) : NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
    }

    /** Creates the {@link TokenStream} of n-grams from the given {@link Reader}. */
    public NGramTokenizer create(Reader input) {
        return new NGramTokenizer(input, minGramSize, maxGramSize);
    }
View Full Code Here


        minGramSize = (minArg != null ? Integer.parseInt(minArg) : NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
    }

    /** Creates the {@link TokenStream} of n-grams from the given {@link Reader}. */
    public NGramTokenizer create(Reader input) {
        return new NGramTokenizer(input, minGramSize, maxGramSize);
    }
View Full Code Here

            @Override public String name() {
                return "nGram";
            }

            @Override public Tokenizer create(Reader reader) {
                return new NGramTokenizer(reader);
            }
        }));

        tokenizerFactories.put("ngram", new PreBuiltTokenizerFactoryFactory(new TokenizerFactory() {
            @Override public String name() {
                return "ngram";
            }

            @Override public Tokenizer create(Reader reader) {
                return new NGramTokenizer(reader);
            }
        }));

        tokenizerFactories.put("edgeNGram", new PreBuiltTokenizerFactoryFactory(new TokenizerFactory() {
            @Override public String name() {
View Full Code Here

        this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
        this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
    }

    @Override public Tokenizer create(Reader reader) {
        return new NGramTokenizer(reader, minGram, maxGram);
    }
View Full Code Here

  }
 
  /** Creates the {@link TokenStream} of n-grams from the given {@link Reader}. */
  @Override
  public NGramTokenizer create(Reader input) {
    return new NGramTokenizer(input, minGramSize, maxGramSize);
  }
View Full Code Here

    String input = "㌰゙5℃№㈱㌘ザゾ";

    CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input),
      Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));

    Tokenizer tokenStream = new NGramTokenizer(TEST_VERSION_CURRENT, newAttributeFactory(), reader, 1, 1);

    assertTokenStreamContents(tokenStream,
      new String[] {"ピ", "ゴ", "5", "°", "c", "n", "o", "(", "株", ")", "グ", "ラ", "ム", "ザ", "ゾ"},
      new int[]{0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9},
      new int[]{1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9, 11},
View Full Code Here

  /**
   * Creates the {@link TokenStream} of n-grams from the given {@link Reader}.
   */
  public NGramTokenizer create(Reader input) {
    return new NGramTokenizer( input, minGramSize, maxGramSize );
  }
View Full Code Here

    String input = "㌰゙5℃№㈱㌘ザゾ";

    CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input),
      Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));

    Tokenizer tokenStream = new NGramTokenizer(TEST_VERSION_CURRENT, reader, 1, 1);

    assertTokenStreamContents(tokenStream,
      new String[] {"ピ", "ゴ", "5", "°", "c", "n", "o", "(", "株", ")", "グ", "ラ", "ム", "ザ", "ゾ"},
      new int[]{0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9},
      new int[]{1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9, 11},
View Full Code Here

    @SuppressWarnings("resource") // caller should close
    @Override
    public TokenStream tokenStream(String fieldName, Reader reader) {
        TokenStream tokenStream;
        NGramTokenizer ngramTokenizer =
                new NGramTokenizer(reader, ngramMinLength, ngramMaxLength);
        if (foldCase) {
            tokenStream = new ULowerCaseFilter(ngramTokenizer);
        } else {
            tokenStream = ngramTokenizer;
        }
View Full Code Here

        this.maxGram = maxGram;
    }

    @Override
    public TokenStream tokenStream(String fieldName, Reader reader) {
        TokenStream s = new NGramTokenizer(reader, minGram, maxGram);
        s = new PositionFilter(s);
        return s;
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.ngram.NGramTokenizer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.