Package org.apache.lucene.analysis.tokenattributes

Examples of org.apache.lucene.analysis.tokenattributes.TermAttribute.termLength()


    TermAttribute termAtt = (TermAttribute) result
        .addAttribute(TermAttribute.class);
    StringBuilder buf = new StringBuilder();
    try {
      while (result.incrementToken()) {
        String word = new String(termAtt.termBuffer(), 0, termAtt
            .termLength());
        buf.append(filter.encode(word)).append(" ");
       
      }
    } catch (IOException e) {
View Full Code Here


    TermAttribute termAtt = ts.addAttribute(TermAttribute.class);

    Vector v1 = new RandomAccessSparseVector(100);                  
    while (ts.incrementToken()) {
      char[] termBuffer = termAtt.termBuffer();
      int termLen = termAtt.termLength();
      String w = new String(termBuffer, 0, termLen);                
      encoder.addToVector(w, 1, v1);                                
    }
    System.out.printf("%s\n", new SequentialAccessSparseVector(v1));
  }
View Full Code Here

   
    TermAttribute termAtt = (TermAttribute) result.addAttribute(TermAttribute.class);
    StringBuilder buf = new StringBuilder();
    try {
      while (result.incrementToken()) {
        if (termAtt.termLength() < 3) continue;
        String word = new String(termAtt.termBuffer(), 0, termAtt.termLength());
        Matcher m = alphabets.matcher(word);
       
        if (m.matches()) {
          buf.append(word).append(" ");
View Full Code Here

    TermAttribute termAtt = (TermAttribute) result.addAttribute(TermAttribute.class);
    StringBuilder buf = new StringBuilder();
    try {
      while (result.incrementToken()) {
        if (termAtt.termLength() < 3) continue;
        String word = new String(termAtt.termBuffer(), 0, termAtt.termLength());
        Matcher m = alphabets.matcher(word);
       
        if (m.matches()) {
          buf.append(word).append(" ");
        }
View Full Code Here

    TokenStream stream = analyzer.reusableTokenStream( fieldName, reader);
    TermAttribute attribute = (TermAttribute) stream.addAttribute( TermAttribute.class );
    stream.reset();

    while ( stream.incrementToken() ) {
      if ( attribute.termLength() > 0 ) {
        String term = attribute.term();
        terms.add( term );
      }
    }
    stream.end();
View Full Code Here

      writer.write('\t'); // edit: Inorder to match Hadoop standard
      // TextInputFormat
      TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
      while (ts.incrementToken()) {
        char[] termBuffer = termAtt.termBuffer();
        int termLen = termAtt.termLength();
        writer.write(termBuffer, 0, termLen);
        writer.write(' ');
      }
    } finally {
      IOUtils.quietClose(reader);
View Full Code Here

   
    List<String> coll = new ArrayList<String>();
    TermAttribute termAtt = (TermAttribute) ts.addAttribute(TermAttribute.class);
    while (ts.incrementToken()) {
      char[] termBuffer = termAtt.termBuffer();
      int termLen = termAtt.termLength();
      String val = new String(termBuffer, 0, termLen);
      coll.add(val);
    }
    return coll.toArray(new String[coll.size()]);
  }
View Full Code Here

        WikipediaDatasetCreatorMapper.OPEN_TEXT_TAG_PATTERN.matcher(document).replaceFirst(""))
          .replaceAll(""));
      TokenStream stream = analyzer.tokenStream(catMatch, new StringReader(document));
      TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
      while (stream.incrementToken()) {
        contents.append(termAtt.termBuffer(), 0, termAtt.termLength()).append(' ');
      }
      output.collect(new Text(WikipediaDatasetCreatorMapper.SPACE_NON_ALPHA_PATTERN.matcher(catMatch)
          .replaceAll("_")), new Text(contents.toString()));
    }
  }
View Full Code Here

                  OutputCollector<Text,StringTuple> output, Reporter reporter) throws IOException {
    TokenStream stream = analyzer.tokenStream(key.toString(), new StringReader(value.toString()));
    TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
    StringTuple document = new StringTuple();
    while (stream.incrementToken()) {
      if (termAtt.termLength() > 0) {
        document.add(new String(termAtt.termBuffer(), 0, termAtt.termLength()));
      }
    }
    output.collect(key, document);
  }
View Full Code Here

    TokenStream stream = analyzer.tokenStream(key.toString(), new StringReader(value.toString()));
    TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
    StringTuple document = new StringTuple();
    while (stream.incrementToken()) {
      if (termAtt.termLength() > 0) {
        document.add(new String(termAtt.termBuffer(), 0, termAtt.termLength()));
      }
    }
    output.collect(key, document);
  }
 
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.