Examples of SentenceTokenizer


Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

    this.stopWords = stopWords;
    wordSegment = new WordSegmenter();
  }

  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new SentenceTokenizer(reader);
    result = new WordTokenizer(result, wordSegment);
    // result = new LowerCaseFilter(result);
    // 不再需要LowerCaseFilter,因为SegTokenFilter已经将所有英文字符转换成小写
    // stem太严格了, This is not bug, this feature:)
    result = new PorterStemFilter(result);
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

  public SmartChineseAnalyzer(Set stopWords) {
    this.stopWords = stopWords;
  }

  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new SentenceTokenizer(reader);
    result = new WordTokenFilter(result);
    // result = new LowerCaseFilter(result);
    // LowerCaseFilter is not needed, as SegTokenFilter lowercases Basic Latin text.
    // The porter stemming is too strict, this is not a bug, this is a feature:)
    result = new PorterStemFilter(result);
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

      throws IOException {
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      setPreviousTokenStream(streams);
      streams.tokenStream = new SentenceTokenizer(reader);
      streams.filteredTokenStream = new WordTokenFilter(streams.tokenStream);
      streams.filteredTokenStream = new PorterStemFilter(streams.filteredTokenStream);
      if (stopWords != null) {
        streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopWords, false);
      }
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

    this.matchVersion = matchVersion;
  }

  @Override
  public TokenStreamComponents createComponents(String fieldName, Reader reader) {
    Tokenizer tokenizer = new SentenceTokenizer(reader);
    TokenStream result = new WordTokenFilter(tokenizer);
    // result = new LowerCaseFilter(result);
    // LowerCaseFilter is not needed, as SegTokenFilter lowercases Basic Latin text.
    // The porter stemming is too strict, this is not a bug, this is a feature:)
    result = new PorterStemFilter(result);
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

    this.matchVersion = matchVersion;
  }

  @Override
  public TokenStream tokenStream(String fieldName, Reader reader) {
    TokenStream result = new SentenceTokenizer(reader);
    result = new WordTokenFilter(result);
    // result = new LowerCaseFilter(result);
    // LowerCaseFilter is not needed, as SegTokenFilter lowercases Basic Latin text.
    // The porter stemming is too strict, this is not a bug, this is a feature:)
    result = new PorterStemFilter(result);
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

      throws IOException {
    SavedStreams streams = (SavedStreams) getPreviousTokenStream();
    if (streams == null) {
      streams = new SavedStreams();
      setPreviousTokenStream(streams);
      streams.tokenStream = new SentenceTokenizer(reader);
      streams.filteredTokenStream = new WordTokenFilter(streams.tokenStream);
      streams.filteredTokenStream = new PorterStemFilter(streams.filteredTokenStream);
      if (!stopWords.isEmpty()) {
        streams.filteredTokenStream = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
                                                     streams.filteredTokenStream, stopWords, false);
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

* Factory for the SmartChineseAnalyzer {@link SentenceTokenizer}
* @lucene.experimental
*/
public class SmartChineseSentenceTokenizerFactory extends BaseTokenizerFactory {
  public Tokenizer create(Reader input) {
    return new SentenceTokenizer(input);
  }
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

   /*
    * (non-Javadoc)
    * @see org.apache.lucene.analysis.Analyzer#tokenStream(java.lang.String, java.io.Reader)
    */
   public TokenStream tokenStream(String fieldName, Reader reader) {
       return new NGramTokenFilter(new UpperCaseFilter(new SentenceTokenizer(reader)), minTokenLength, maxTokenLength);
   }
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

* @lucene.experimental
*/
public class SmartChineseSentenceTokenizerFactory extends TokenizerFactory {
  @Override
  public Tokenizer create(Reader input) {
    return new SentenceTokenizer(input);
  }
View Full Code Here

Examples of org.apache.lucene.analysis.cn.smart.SentenceTokenizer

                + "Please report this on the dev@apache.stanbol.org or create an "
                + "JIRA issue about this.");
        }
        if(!at.getSentences().hasNext()) { //no sentences  ... use this engine to detect
            //first the sentences
            TokenStream sentences = new SentenceTokenizer(new CharSequenceReader(at.getText()));
            try {
                while(sentences.incrementToken()){
                    OffsetAttribute offset = sentences.addAttribute(OffsetAttribute.class);
                    Sentence s = at.addSentence(offset.startOffset(), offset.endOffset());
                    if(log.isTraceEnabled()) {
                        log.trace("detected {}:{}",s,s.getSpan());
                    }
                }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.