Package org.apache.lucene.document

Examples of org.apache.lucene.document.Fieldable.tokenStreamValue()


                        for (int k = 0; k < fields.length; k++)
                        {
                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           t = field.tokenStreamValue().next(t);
                           String value = new String(t.termBuffer(), 0, t.termLength());
                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
View Full Code Here


                        for (int k = 0; k < fields.length; k++)
                        {
                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           t = field.tokenStreamValue().next(t);
                           String value = new String(t.termBuffer(), 0, t.termLength());
                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
View Full Code Here

        tokensByField.put(field, tokens);

        if (field.isTokenized()) {
          final TokenStream tokenStream;
          // todo readerValue(), binaryValue()
          if (field.tokenStreamValue() != null) {
            tokenStream = field.tokenStreamValue();
          } else {
            tokenStream = analyzer.reusableTokenStream(field.name(), new StringReader(field.stringValue()));
          }
View Full Code Here

        if (field.isTokenized()) {
          final TokenStream tokenStream;
          // todo readerValue(), binaryValue()
          if (field.tokenStreamValue() != null) {
            tokenStream = field.tokenStreamValue();
          } else {
            tokenStream = analyzer.reusableTokenStream(field.name(), new StringReader(field.stringValue()));
          }

          // reset the TokenStream to the first token         
View Full Code Here

          fieldState.length++;
          fieldState.position++;
          anyToken = valueLength > 0;
        } else {                                  // tokenized field
          final TokenStream stream;
          final TokenStream streamValue = field.tokenStreamValue();

          if (streamValue != null)
            stream = streamValue;
          else {
            // the field does not have a TokenStream,
View Full Code Here

                            Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
                            Token t = new Token();
                            for (int k = 0; k < fields.length; k++) {
                                Fieldable field = fields[k];
                                // assume properties fields use SingleTokenStream
                                t = field.tokenStreamValue().next(t);
                                String value = new String(t.termBuffer(), 0, t.termLength());
                                if (value.startsWith(namePrefix)) {
                                    // extract value
                                    value = value.substring(namePrefix.length());
                                    // create new named value
View Full Code Here

          fieldState.offset += valueLength;
          fieldState.length++;
          fieldState.position++;
        } else {                                  // tokenized field
          final TokenStream stream;
          final TokenStream streamValue = field.tokenStreamValue();

          if (streamValue != null)
            stream = streamValue;
          else {
            // the field does not have a TokenStream,
View Full Code Here

                        for (int k = 0; k < fields.length; k++)
                        {
                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           t = field.tokenStreamValue().next(t);
                           String value = new String(t.termBuffer(), 0, t.termLength());
                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
View Full Code Here

            addPosition(fieldName, stringValue, position++, null, null);
          offset += stringValue.length();
          length++;
        } else
        { // tokenized field
          TokenStream stream = field.tokenStreamValue();
         
          // the field does not have a TokenStream,
          // so we have to obtain one from the analyzer
          if (stream == null) {
            Reader reader;        // find or make Reader
View Full Code Here

                        for (int k = 0; k < fields.length; k++)
                        {
                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           t = field.tokenStreamValue().next(t);
                           String value = new String(t.termBuffer(), 0, t.termLength());
                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.