Package org.apache.lucene.analysis.Analyzer

Examples of org.apache.lucene.analysis.Analyzer.TokenStreamComponents


        try {
          tokenizer = new PatternTokenizer(reader, Pattern.compile("a"), -1);
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
        return new TokenStreamComponents(tokenizer, tokenizer);
      }   
    };
    checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
   
    Analyzer b = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = null;
        try {
          tokenizer = new PatternTokenizer(reader, Pattern.compile("a"), 0);
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
        return new TokenStreamComponents(tokenizer, tokenizer);
      }   
    };
    checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER);
  }
View Full Code Here


  public void testRandomStrings() throws Exception {
    Analyzer a = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new ReversePathHierarchyTokenizer(newAttributeFactory(), reader, DEFAULT_DELIMITER, DEFAULT_DELIMITER, DEFAULT_SKIP);
        return new TokenStreamComponents(tokenizer, tokenizer);
      }   
    };
    // TODO: properly support positionLengthAttribute
    checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER, 20, false, false);
  }
View Full Code Here

    Random random = random();
    Analyzer a = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new ReversePathHierarchyTokenizer(newAttributeFactory(), reader, DEFAULT_DELIMITER, DEFAULT_DELIMITER, DEFAULT_SKIP);
        return new TokenStreamComponents(tokenizer, tokenizer);
      }   
    };
    // TODO: properly support positionLengthAttribute
    checkRandomData(random, a, 100*RANDOM_MULTIPLIER, 1027, false, false);
  }
View Full Code Here

  public void testCrazyOffsetGap() throws Exception {
    Directory dir = newDirectory();
    Analyzer analyzer = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.KEYWORD, false));
      }

      @Override
      public int getOffsetGap(String fieldName) {
        return -10;
View Full Code Here

  public void testRandomStrings() throws Exception {
    Analyzer a = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new PatternTokenizer(newAttributeFactory(), reader, Pattern.compile("a"), -1);
        return new TokenStreamComponents(tokenizer);
      }   
    };
    checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER);
   
    Analyzer b = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
        Tokenizer tokenizer = new PatternTokenizer(newAttributeFactory(), reader, Pattern.compile("a"), 0);
        return new TokenStreamComponents(tokenizer);
      }   
    };
    checkRandomData(random(), b, 1000*RANDOM_MULTIPLIER);
  }
View Full Code Here

    /* analyzer that uses whitespace + wdf */
    Analyzer a = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT,
            tokenizer,
            flags, protWords));
      }
    };

    /* in this case, works as expected. */
    assertAnalyzesTo(a, "LUCENE / SOLR", new String[] { "LUCENE", "SOLR" },
        new int[] { 0, 9 },
        new int[] { 6, 13 },
        new int[] { 1, 1 });
   
    /* only in this case, posInc of 2 ?! */
    assertAnalyzesTo(a, "LUCENE / solR", new String[] { "LUCENE", "sol", "solR", "R" },
        new int[] { 0, 9, 9, 12 },
        new int[] { 6, 12, 13, 13 },
        new int[] { 1, 1, 0, 1 });
   
    assertAnalyzesTo(a, "LUCENE / NUTCH SOLR", new String[] { "LUCENE", "NUTCH", "SOLR" },
        new int[] { 0, 9, 15 },
        new int[] { 6, 14, 19 },
        new int[] { 1, 1, 1 });
   
    /* analyzer that will consume tokens with large position increments */
    Analyzer a2 = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT,
            new LargePosIncTokenFilter(tokenizer),
            flags, protWords));
      }
    };
   
    /* increment of "largegap" is preserved */
    assertAnalyzesTo(a2, "LUCENE largegap SOLR", new String[] { "LUCENE", "largegap", "SOLR" },
        new int[] { 0, 7, 16 },
        new int[] { 6, 15, 20 },
        new int[] { 1, 10, 1 });
   
    /* the "/" had a position increment of 10, where did it go?!?!! */
    assertAnalyzesTo(a2, "LUCENE / SOLR", new String[] { "LUCENE", "SOLR" },
        new int[] { 0, 9 },
        new int[] { 6, 13 },
        new int[] { 1, 11 });
   
    /* in this case, the increment of 10 from the "/" is carried over */
    assertAnalyzesTo(a2, "LUCENE / solR", new String[] { "LUCENE", "sol", "solR", "R" },
        new int[] { 0, 9, 9, 12 },
        new int[] { 6, 12, 13, 13 },
        new int[] { 1, 11, 0, 1 });
   
    assertAnalyzesTo(a2, "LUCENE / NUTCH SOLR", new String[] { "LUCENE", "NUTCH", "SOLR" },
        new int[] { 0, 9, 15 },
        new int[] { 6, 14, 19 },
        new int[] { 1, 11, 1 });

    Analyzer a3 = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        StopFilter filter = new StopFilter(TEST_VERSION_CURRENT,
            tokenizer, StandardAnalyzer.STOP_WORDS_SET);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, filter, flags, protWords));
      }
    };

    assertAnalyzesTo(a3, "lucene.solr",
        new String[] { "lucene", "lucenesolr", "solr" },
View Full Code Here

    /* analyzer that uses whitespace + wdf */
    Analyzer a = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, null));
      }
    };
   
    assertAnalyzesTo(a, "abc-def-123-456",
        new String[] { "abc", "abcdef", "abcdef123456", "def", "123", "123456", "456" },
View Full Code Here

    /* analyzer that uses whitespace + wdf */
    Analyzer a = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String field, Reader reader) {
        Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
        return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, null));
      }
    };
   
    assertAnalyzesTo(a, "abc-def-123-456",
        new String[] { "abc-def-123-456", "abc", "abcdef", "abcdef123456", "def", "123", "123456", "456" },
View Full Code Here

      Analyzer a = new Analyzer() {
       
        @Override
        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
          Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, protectedWords));
        }
      };
      // TODO: properly support positionLengthAttribute
      checkRandomData(random(), a, 1000*RANDOM_MULTIPLIER, 20, false, false);
    }
View Full Code Here

      Analyzer a = new Analyzer() {
       
        @Override
        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
          Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
          return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(TEST_VERSION_CURRENT, tokenizer, flags, protectedWords));
        }
      };
      // TODO: properly support positionLengthAttribute
      checkRandomData(random(), a, 100*RANDOM_MULTIPLIER, 8192, false, false);
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.Analyzer.TokenStreamComponents

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.