Package org.apache.lucene.analysis.core

Examples of org.apache.lucene.analysis.core.KeywordAnalyzer


         TopDocs topDoc = null;
         if ( text.indexOf( '-' ) == -1 ) {
            q = new TermQuery( new Term( iv_lookupFieldName, text ) );
            topDoc = iv_searcher.search( q, iv_maxHits );
         } else // needed the KeyworkAnalyzer for situations where the hypen was included in the f-word
            final QueryParser query = new QueryParser( Version.LUCENE_40, iv_lookupFieldName, new KeywordAnalyzer() );
            try {
               //CTAKES-63 - I believe all of the chars in the str token should be escaped to avoid issues such as a token ending with ']'
               //topDoc = iv_searcher.search(query.parse(text.replace('-', ' ')), iv_maxHits);
               final String escaped = QueryParserBase.escape( text.replace( '-', ' ' ) );
               topDoc = iv_searcher.search( query.parse( escaped ), iv_maxHits );
View Full Code Here


                    LOG.error("Invalid range index configuration: " + e.getMessage());
                }
            }
        }
        // default analyzer
        analyzer = new KeywordAnalyzer();
    }
View Full Code Here

        this.docDef = def ;

        // create the analyzer as a wrapper that uses KeywordAnalyzer for
        // entity and graph fields and StandardAnalyzer for all other
        Map<String, Analyzer> analyzerPerField = new HashMap<>() ;
        analyzerPerField.put(def.getEntityField(), new KeywordAnalyzer()) ;
        if ( def.getGraphField() != null )
            analyzerPerField.put(def.getGraphField(), new KeywordAnalyzer()) ;
       
        for (String field : def.fields()) {
          Analyzer analyzer = def.getAnalyzer(field);
          if (analyzer != null) {
            analyzerPerField.put(field, analyzer);
View Full Code Here

        .
    */

    @Override
    public Analyzer open(Assembler a, Resource root, Mode mode) {
      return new KeywordAnalyzer();
    }
View Full Code Here

        this.docDef = def ;

        // create the analyzer as a wrapper that uses KeywordAnalyzer for
        // entity and graph fields and StandardAnalyzer for all other
        Map<String, Analyzer> analyzerPerField = new HashMap<String, Analyzer>() ;
        analyzerPerField.put(def.getEntityField(), new KeywordAnalyzer()) ;
        if ( def.getGraphField() != null )
            analyzerPerField.put(def.getGraphField(), new KeywordAnalyzer()) ;
        this.analyzer = new PerFieldAnalyzerWrapper(new StandardAnalyzer(VER), analyzerPerField) ;

        // force creation of the index if it don't exist
        // otherwise if we get a search before data is written we get an
        // exception
View Full Code Here

        Analyzer geoNetworkAnalyzer = SearchManager.createGeoNetworkAnalyzer(stopwords, ignoreChars);
    Map<String, Analyzer> analyzers = new HashMap<String, Analyzer>();
    analyzers.put(LuceneIndexField.UUID, new GeoNetworkAnalyzer());
    analyzers.put(LuceneIndexField.PARENTUUID, new GeoNetworkAnalyzer());
    analyzers.put(LuceneIndexField.OPERATESON, new GeoNetworkAnalyzer());
    analyzers.put(LuceneIndexField.SUBJECT, new KeywordAnalyzer());
   
    pfaw = new PerFieldAnalyzerWrapper(geoNetworkAnalyzer, analyzers );
        return pfaw;
  }
View Full Code Here

    public void before() throws Exception {
        Map<String, Analyzer> analyzers = new HashMap<String, Analyzer>();
        analyzers.put("_uuid", new GeoNetworkAnalyzer());
        analyzers.put("parentUuid", new GeoNetworkAnalyzer());
        analyzers.put("operatesOn", new GeoNetworkAnalyzer());
        analyzers.put("subject", new KeywordAnalyzer());

        _analyzer = new PerFieldAnalyzerWrapper(new GeoNetworkAnalyzer(), analyzers);

        final String configFile = "/WEB-INF/config-lucene.xml";
        final String appDir = new File(LuceneQueryTest.class.getResource(configFile).getFile()).getParentFile().getParent()+"/";
View Full Code Here

   * @return the parsed query
   */
  private Query parseTokens(String fieldName, String value) {
    BooleanQuery searchQuery = new BooleanQuery();
    if (value != null) {
      Analyzer analyzer = new KeywordAnalyzer();

      try {
        TokenStream tokenStream =
          analyzer.tokenStream(fieldName, new StringReader(value));
        tokenStream.reset();
        CharTermAttribute attr =
          tokenStream.getAttribute(CharTermAttribute.class);

        while (tokenStream.incrementToken()) {
View Full Code Here

         TopDocs topDoc = null;
         if ( text.indexOf( '-' ) == -1 ) {
            q = new TermQuery( new Term( iv_lookupFieldName, text ) );
            topDoc = iv_searcher.search( q, iv_maxHits );
         } else // needed the KeyworkAnalyzer for situations where the hypen was included in the f-word
            final QueryParser query = new QueryParser( Version.LUCENE_40, iv_lookupFieldName, new KeywordAnalyzer() );
            try {
               //CTAKES-63 - I believe all of the chars in the str token should be escaped to avoid issues such as a token ending with ']'
               //topDoc = iv_searcher.search(query.parse(text.replace('-', ' ')), iv_maxHits);
               final String escaped = QueryParserBase.escape( text.replace( '-', ' ' ) );
               topDoc = iv_searcher.search( query.parse( escaped ), iv_maxHits );
View Full Code Here

        final Map<String, Analyzer> analyzerMap = Maps.newHashMap();
        analyzerMap.put("title", hebrewAnalyzer);
        analyzerMap.put("topic", hebrewAnalyzer);
        analyzerMap.put("parent_title", hebrewAnalyzer);
        analyzerMap.put("replies.text", hebrewAnalyzer);
        perFieldAnalyzerWrapper = new PerFieldAnalyzerWrapper(new KeywordAnalyzer(), analyzerMap);
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.analysis.core.KeywordAnalyzer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.