Package org.apache.lucene.analysis

Examples of org.apache.lucene.analysis.Analyzer


    qp.setDefaultOperator(QueryParserWrapper.OR_OPERATOR);
    assertEquals(QueryParserWrapper.OR_OPERATOR, qp.getDefaultOperator());
  }

  public void testPunct() throws Exception {
    Analyzer a = new WhitespaceAnalyzer();
    assertQueryEquals("a&b", a, "a&b");
    assertQueryEquals("a&&b", a, "a&&b");
    assertQueryEquals(".NET", a, ".NET");
  }
View Full Code Here


    // The numbers go away because SimpleAnalzyer ignores them
    assertQueryEquals("3", null, "");
    assertQueryEquals("term 1.0 1 2", null, "term");
    assertQueryEquals("term term1 term2", null, "term term term");

    Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
    assertQueryEquals("3", a, "3");
    assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
    assertQueryEquals("term term1 term2", a, "term term1 term2");
  }
View Full Code Here

        + "}", "{" + getDate(startDate, resolution) + " TO "
        + getDate(endDate, resolution) + "}");
  }

  public void testEscaped() throws Exception {
    Analyzer a = new WhitespaceAnalyzer();

    /*
     * assertQueryEquals("\\[brackets", a, "\\[brackets");
     * assertQueryEquals("\\[brackets", null, "brackets");
     * assertQueryEquals("\\\\", a, "\\\\"); assertQueryEquals("\\+blah", a,
View Full Code Here

    // LUCENE-1189
    assertQueryEquals("(\"a\\\\\") or (\"b\")", a, "a\\ or b");
  }

  public void testQueryStringEscaping() throws Exception {
    Analyzer a = new WhitespaceAnalyzer();

    assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
    assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
    assertEscapedQueryEquals("a:b:c", a, "a\\:b\\:c");
    assertEscapedQueryEquals("a\\b:c", a, "a\\\\b\\:c");
View Full Code Here


  @Override
  protected Query[] prepareQueries() throws Exception {

    Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer",
            "org.apache.lucene.analysis.standard.StandardAnalyzer"));
    String defaultField = config.get("file.query.maker.default.field", DocMaker.BODY_FIELD);
    QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, defaultField, anlzr);

    List<Query> qq = new ArrayList<Query>();
View Full Code Here


    private Query explainQuery(String queryString) throws IOException, ParseException {

    searcher = new IndexSearcher(indexName, true);
    Analyzer analyzer = createAnalyzer();
    getFieldInfo();

    int arraySize = indexedFields.size();
    String indexedArray[] = new String[arraySize];
    for (int ii = 0; ii < arraySize; ii++) {
View Full Code Here

   * TODO: Allow user to specify analyzer
   */
  private void initSearch(String queryString) throws IOException, ParseException {

    searcher = new IndexSearcher(indexName, true);
    Analyzer analyzer = createAnalyzer();
    getFieldInfo();

    int arraySize = fields.size();
    fieldsArray = new String[arraySize];
    for (int ii = 0; ii < arraySize; ii++) {
View Full Code Here

    throws IOException {

    Map<String,Integer> tokenMap = new HashMap<String,Integer>();
    final int maxFieldLength = 10000;

    Analyzer analyzer = createAnalyzer();
    for (Fieldable field : doc.getFields()) {
      String fieldName = field.name();
      if (field.isIndexed()) {
        if (field.isTokenized()) {     // un-tokenized field
          Reader reader;        // find or make Reader
          if (field.readerValue() != null)
            reader = field.readerValue();
          else if (field.stringValue() != null)
            reader = new StringReader(field.stringValue());
          else
            throw new IllegalArgumentException
              ("field must have either String or Reader value");

          int position = 0;
          // Tokenize field and add to postingTable
          TokenStream stream = analyzer.tokenStream(fieldName, reader);
          TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
          PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
         
          try {
            while (stream.incrementToken()) {
View Full Code Here

    if (child == null)
      throw new IllegalArgumentException("child analyzer must not be null");
    if (log == null)
      throw new IllegalArgumentException("logStream must not be null");

    return new Analyzer() {
      @Override
      public TokenStream tokenStream(final String fieldName, Reader reader) {
        return new TokenFilter(child.tokenStream(fieldName, reader)) {
          private int position = -1;
          private TermAttribute termAtt = addAttribute(TermAttribute.class);
View Full Code Here

    if (maxTokens < 0)
      throw new IllegalArgumentException("maxTokens must not be negative");
    if (maxTokens == Integer.MAX_VALUE)
      return child; // no need to wrap
 
    return new Analyzer() {
      @Override
      public TokenStream tokenStream(String fieldName, Reader reader) {
        return new TokenFilter(child.tokenStream(fieldName, reader)) {
          private int todo = maxTokens;
         
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.Analyzer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.