Package org.apache.lucene.analysis.tokenattributes

Examples of org.apache.lucene.analysis.tokenattributes.TermAttribute


 
  @Override
  public void map(Text key, Text value,
                  OutputCollector<Text,StringTuple> output, Reporter reporter) throws IOException {
    TokenStream stream = analyzer.tokenStream(key.toString(), new StringReader(value.toString()));
    TermAttribute termAtt = (TermAttribute) stream.addAttribute(TermAttribute.class);
    StringTuple document = new StringTuple();
    while (stream.incrementToken()) {
      if (termAtt.termLength() > 0) {
        document.add(new String(termAtt.termBuffer(), 0, termAtt.termLength()));
      }
    }
    output.collect(key, document);
  }
View Full Code Here


  }
 
  public void testNext() throws IOException {
   
   
    TermAttribute termAtt = (TermAttribute)merger.getAttribute(TermAttribute.class);
    OffsetAttribute offsetAtt = (OffsetAttribute)merger.getAttribute(OffsetAttribute.class);
    PositionIncrementAttribute posIncAtt = (PositionIncrementAttribute)merger.getAttribute(PositionIncrementAttribute.class);
   
    merger.incrementToken();
   
    assertEquals("1111", termAtt.term());
    assertEquals(0, offsetAtt.startOffset());
    assertEquals(4, offsetAtt.endOffset());
    assertEquals(1, posIncAtt.getPositionIncrement());
   
    merger.incrementToken();   
    assertEquals("1111",  termAtt.term());
    assertEquals(5,  offsetAtt.startOffset());
    assertEquals(9,  offsetAtt.endOffset());
    assertEquals(1,  posIncAtt.getPositionIncrement());

    merger.incrementToken();
    assertEquals("2222",  termAtt.term());
    assertEquals(5,  offsetAtt.startOffset());
    assertEquals(9,  offsetAtt.endOffset());
    assertEquals(0,  posIncAtt.getPositionIncrement());

    merger.incrementToken();
    assertEquals("1111",  termAtt.term());
    assertEquals(10,  offsetAtt.startOffset());
    assertEquals(14,  offsetAtt.endOffset());
    assertEquals(1,  posIncAtt.getPositionIncrement());

    merger.incrementToken();
    assertEquals("3333",  termAtt.term());
    assertEquals(10,  offsetAtt.startOffset());
    assertEquals(14,  offsetAtt.endOffset());
    assertEquals(0,  posIncAtt.getPositionIncrement());

    merger.incrementToken();
    assertEquals("1111",  termAtt.term());
    assertEquals(15, offsetAtt.startOffset());
    assertEquals(19, offsetAtt.endOffset());
    assertEquals(1,  posIncAtt.getPositionIncrement());

    merger.incrementToken();
    assertEquals("2222",  termAtt.term());
    assertEquals(15,  offsetAtt.startOffset());
    assertEquals(19, offsetAtt.endOffset());
    assertEquals(0, posIncAtt.getPositionIncrement());
  }
View Full Code Here

                           Fieldable field = fields[k];
                           // assume properties fields use
                           // SingleTokenStream
                           //t = field.tokenStreamValue().next(t);
                           field.tokenStreamValue().incrementToken();
                           TermAttribute term = field.tokenStreamValue().getAttribute(TermAttribute.class);
                           PayloadAttribute payload = field.tokenStreamValue().getAttribute(PayloadAttribute.class);

                           String value = new String(term.termBuffer(), 0, term.termLength());

                           if (value.startsWith(namePrefix))
                           {
                              // extract value
                              value = value.substring(namePrefix.length());
                              // create new named value
                              QPath p = getRelativePath(state, propState);
                              String path = getNamespaceMappings().translatePath(p);
                              value = FieldNames.createNamedValue(path, value);

                              term.setTermBuffer(value);
                              doc.add(new Field(field.name(), new SingletonTokenStream(term.term(), payload
                                 .getPayload())));
                              doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, parent.getIdentifier(),
                                 Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS));
                           }
                        }
View Full Code Here

    } else {
      TokenStream tokens = analyzer.tokenStream(
                              field, new StringReader(terms.toString()));

      TermAttribute ta = tokens.getAttribute(TermAttribute.class);
      try
      {
        String termText;
        while (tokens.incrementToken())
        {
          if ((termText = ta.term()) == null)
            break;
          result.add(termText);
        }
      } catch (IOException e) {
        // ignore (?)
View Full Code Here

            throws IOException {
        TokenStream ts = analyzer.tokenStream(fieldName, r);
        int tokenCount = 0;
        // for every token
        while (ts.incrementToken()) {
            TermAttribute term = ts.getAttribute(TermAttribute.class);
            String word =  term.term();
            tokenCount++;
            if (tokenCount > maxNumTokensParsed) {
                break;
            }
            if (isNoiseWord(word)) {
View Full Code Here

                                Fieldable[] fields = aDoc.getFieldables(FieldNames.PROPERTIES);
                                for (Fieldable field : fields) {

                                    // assume properties fields use SingleTokenStream
                                    TokenStream tokenStream = field.tokenStreamValue();
                                    TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class);
                                    PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class);
                                    tokenStream.incrementToken();
                                    tokenStream.end();
                                    tokenStream.close();

                                    String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength());
                                    if (value.startsWith(namePrefix)) {
                                        // extract value
                                        value = value.substring(namePrefix.length());
                                        // create new named value
                                        Path p = getRelativePath(state, propState);
                                        String path = getNamespaceMappings().translatePath(p);
                                        value = FieldNames.createNamedValue(path, value);
                                        termAttribute.setTermBuffer(value);
                                        doc.add(new Field(field.name(),
                                                new SingletonTokenStream(value, (Payload) payloadAttribute.getPayload().clone())));
                                        doc.add(new Field(
                                                FieldNames.AGGREGATED_NODE_UUID,
                                                false,
View Full Code Here

          "Alternatively, apply the ignoreFieldBridge() option to " +
          "pass String parameters" );
    }
    Reader reader = new StringReader(localText);
    TokenStream stream = analyzer.reusableTokenStream( fieldName, reader);
    TermAttribute attribute = stream.addAttribute( TermAttribute.class );
    stream.reset();

    while ( stream.incrementToken() ) {
      if ( attribute.termLength() > 0 ) {
        String term = attribute.term();
        terms.add( term );
      }
    }
    stream.end();
    stream.close();
View Full Code Here

          "Alternatively, apply the ignoreFieldBridge() option to " +
          "pass String parameters" );
    }
    Reader reader = new StringReader(localText);
    TokenStream stream = analyzer.reusableTokenStream( fieldName, reader);
    TermAttribute attribute = stream.addAttribute( TermAttribute.class );
    stream.reset();

    while ( stream.incrementToken() ) {
      if ( attribute.termLength() > 0 ) {
        String term = attribute.term();
        terms.add( term );
      }
    }
    stream.end();
    stream.close();
View Full Code Here

    final String sentence = phraseContext.getSentence();
    try {
      Reader reader = new StringReader( sentence );
      stream = queryContext.getQueryAnalyzer().reusableTokenStream( fieldName, reader);

      TermAttribute termAttribute = stream.addAttribute( TermAttribute.class );
      PositionIncrementAttribute positionAttribute = stream.addAttribute( PositionIncrementAttribute.class );

      stream.reset();
      int position = -1; //start at -1 since we apply at least one increment
      List<Term> termsAtSamePosition = null;
      while ( stream.incrementToken() ) {
        int positionIncrement = 1;
        if ( positionAttribute != null ) {
          positionIncrement = positionAttribute.getPositionIncrement();
        }

        if ( positionIncrement > 0 ) {
          position+=positionIncrement;
          termsAtSamePosition = termsPerPosition.get(position);
        }

        if (termsAtSamePosition == null) {
          termsAtSamePosition = new ArrayList<Term>();
          termsPerPosition.put( position, termsAtSamePosition  );
        }

        termsAtSamePosition.add( new Term( fieldName, termAttribute.term() ) );
        if ( termsAtSamePosition.size() > 1 ) {
          isMultiPhrase = true;
        }
      }
    }
View Full Code Here

    public boolean incrementToken() {
      if (upto == 4) {
        return false;
      }
      PositionIncrementAttribute posIncr = (PositionIncrementAttribute) addAttribute(PositionIncrementAttribute.class);
      TermAttribute term = (TermAttribute) addAttribute(TermAttribute.class);
      if (upto == 0) {
        posIncr.setPositionIncrement(1);
        term.setTermBuffer("a");
      } else if (upto == 1) {
        posIncr.setPositionIncrement(1);
        term.setTermBuffer("b");
      } else if (upto == 2) {
        posIncr.setPositionIncrement(0);
        term.setTermBuffer("c");
      } else {
        posIncr.setPositionIncrement(0);
        term.setTermBuffer("d");
      }
      upto++;
      return true;
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.tokenattributes.TermAttribute

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.