Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.Token.termBuffer()


      document = StringEscapeUtils.unescapeHtml(document.replaceFirst("<text xml:space=\"preserve\">", "").replaceAll("</text>", ""));
      TokenStream stream = analyzer.tokenStream(country, new StringReader(document));
      while(true){
        Token token = stream.next();
        if(token==null) break;
        contents.append(token.termBuffer(), 0, token.termLength()).append(' ');
      }
      output.collect(new Text(country.replace(" ","_")), new Text(contents.toString()));
    }
  }
 
View Full Code Here


      writer.write(label);
      writer.write('\t'); // edit: Inorder to match Hadoop standard
      // TextInputFormat
      Token token = new Token();
      while ((token = ts.next(token)) != null) {
        char[] termBuffer = token.termBuffer();
        int termLen = token.termLength();
        writer.write(termBuffer, 0, termLen);
        writer.write(' ');
      }
    } finally {
View Full Code Here

    TokenStream ts = analyzer.tokenStream("", reader);

    Token token;
    List<String> coll = new ArrayList<String>();
    while ((token = ts.next()) != null) {
      char[] termBuffer = token.termBuffer();
      int termLen = token.termLength();
      String val = new String(termBuffer, 0, termLen);
      coll.add(val);
    }
    return coll.toArray(new String[coll.size()]);
View Full Code Here

        if (!field.isTokenized()) {      // un-tokenized field
          String stringValue = field.stringValue();
          final int valueLength = stringValue.length();
          Token token = localToken;
          token.clear();
          char[] termBuffer = token.termBuffer();
          if (termBuffer.length < valueLength)
            termBuffer = token.resizeTermBuffer(valueLength);
          stringValue.getChars(0, valueLength, termBuffer, 0);
          token.setTermLength(valueLength);
          token.setStartOffset(offset);
View Full Code Here

                                Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
                                Token t = new Token();
                                for (Fieldable field : fields) {
                                    // assume properties fields use SingleTokenStream
                                    t = field.tokenStreamValue().next(t);
                                    String value = new String(t.termBuffer(), 0, t.termLength());
                                    if (value.startsWith(namePrefix)) {
                                        // extract value
                                        value = value.substring(namePrefix.length());
                                        // create new named value
                                        Path p = getRelativePath(state, propState);
View Full Code Here

                                Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
                                Token t = new Token();
                                for (Fieldable field : fields) {
                                    // assume properties fields use SingleTokenStream
                                    t = field.tokenStreamValue().next(t);
                                    String value = new String(t.termBuffer(), 0, t.termLength());
                                    if (value.startsWith(namePrefix)) {
                                        // extract value
                                        value = value.substring(namePrefix.length());
                                        // create new named value
                                        Path p = getRelativePath(state, propState);
View Full Code Here

    Token t = input.next(result);

    if (t == null)
      return null;

    char[] buffer = t.termBuffer();
    final int bufferLength = t.termLength();
    final String type = t.type();

    if (type == APOSTROPHE_TYPE &&      // remove 's
  bufferLength >= 2 &&
View Full Code Here

                        {
                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           t = field.tokenStreamValue().next(t);
                           String value = new String(t.termBuffer(), 0, t.termLength());
                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
                              // create new named value
View Full Code Here

      writer.write(label);
      writer.write('\t'); // edit: Inorder to match Hadoop standard
      // TextInputFormat
      Token token = new Token();
      while ((token = ts.next(token)) != null) {
        char[] termBuffer = token.termBuffer();
        int termLen = token.termLength();
        writer.write(termBuffer, 0, termLen);
        writer.write(' ');
      }
    } finally {
View Full Code Here

    TokenStream ts = analyzer.tokenStream("", reader);

    Token token;
    List<String> coll = new ArrayList<String>();
    while ((token = ts.next()) != null) {
      char[] termBuffer = token.termBuffer();
      int termLen = token.termLength();
      String val = new String(termBuffer, 0, termLen);
      coll.add(val);
    }
    return coll.toArray(new String[coll.size()]);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.