Package org.apache.lucene.index.memory

Examples of org.apache.lucene.index.memory.MemoryIndex$MemoryIndexReader$MemoryTermsEnum


      throw new IllegalArgumentException("textAnalyzer must not be null");
    if (queryAnalyzer == null)
      throw new IllegalArgumentException("queryAnalyzer must not be null");
   
    if (DEFAULT_CAPACITY <= 0) { // cache disabled?
      MemoryIndex index = new MemoryIndex();
      index.addField(FIELD_NAME, text, textAnalyzer);
      return index.search(parse(query, queryAnalyzer));
    }

    Object key = Pool.createHashKeys(new Object[] {text, query, textAnalyzer, queryAnalyzer});
    Float score = (Float) entries.get(key); // hit/miss ratio is app specific
//    Float score = null;
    if (score == null) { // cache miss
      Object qkey = Pool.createHashKeys(new Object[] {query, queryAnalyzer});
      Query luceneQuery = (Query) entries.get(qkey); // typically good hit/miss ratio
//      Query luceneQuery = null;
      if (luceneQuery == null) { // cache miss
        luceneQuery = parse(query, queryAnalyzer);
        entries.put(qkey, luceneQuery);
      }
     
      Object tkey = Pool.createHashKeys(new Object[] {text, textAnalyzer, null});
      MemoryIndex index = (MemoryIndex) entries.get(tkey);
//      MemoryIndex index = null;
      if (index == null) { // cache miss
        index = new MemoryIndex();
        index.addField(FIELD_NAME, text, textAnalyzer);
        entries.put(tkey, index);
      }

      /*
       * TODO: Reduce the following lock scope, minimizing lock
       * contention? Though not publicly documented anywhere, with the
       * current impl, a MemoryIndex instance can actually safely have
       * multiple concurrent readers, but I'm not sure that's also true
       * for the luceneQuery instance. For the moment better safe than
       * sorry...
       */
      synchronized (luceneQuery) {
        score = new Float(index.search(luceneQuery));
      }
     
      entries.put(key, score);
    }
    return score.floatValue();
View Full Code Here


    String field = "f";
    Nodes lines = XQueryUtil.xquery(doc, path);
    System.out.println("lines=" + lines.size());
    MemoryIndex[] indexes = new MemoryIndex[lines.size()];
    for (int i=0; i < lines.size(); i++) {
      indexes[i] = new MemoryIndex();
      indexes[i].addField(field, lines.get(i).getValue(), textAnalyzer);
    }
    doc = null;   // help gc
    lines = null; // help gc
   
View Full Code Here

      tokenStream = new CachingTokenFilter(tokenStream);
      cachedTokenStream = true;
    }
    IndexReader reader = readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, tokenStream);
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

      tokenStream = new CachingTokenFilter(tokenStream);
      cachedTokenStream = true;
    }
    IndexReader reader = (IndexReader) readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, tokenStream);
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

      tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
      cachedTokenStream = true;
    }
    IndexReader reader = readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

      if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
        assert !cachedTokenStream;
        tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
        cachedTokenStream = true;
      }
      final MemoryIndex indexer = new MemoryIndex(true);
      indexer.addField(DelegatingAtomicReader.FIELD_NAME, tokenStream);
      tokenStream.reset();
      final IndexSearcher searcher = indexer.createSearcher();
      // MEM index has only atomic ctx
      internalReader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
    }
    return internalReader.getContext();
  }
View Full Code Here

      if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
        assert !cachedTokenStream;
        tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
        cachedTokenStream = true;
      }
      final MemoryIndex indexer = new MemoryIndex(true);
      indexer.addField(DelegatingAtomicReader.FIELD_NAME, tokenStream);
      tokenStream.reset();
      final IndexSearcher searcher = indexer.createSearcher();
      // MEM index has only atomic ctx
      reader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
    }
    return reader.getContext();
  }
View Full Code Here

  @Override
  public void process(CAS aCAS)
      throws AnalysisEngineProcessException {

    // First create the index of the document text
    MemoryIndex index = new MemoryIndex();

    List fields = createDocument(aCAS).getFields();
   
    for (Iterator it = fields.iterator(); it.hasNext(); ) {
      Field field = (Field) it.next();
     
      if (field.isIndexed() && field.tokenStreamValue() != null) {
        index.addField(field.name(), field.tokenStreamValue());
      }
    }
   
    // Search all queries against the one document index
    for (SearchQuery query : searchQueryProvider.getSearchQueries(aCAS)) {

      float score = index.search(query.query());

      if (score > matchingThreshold) {
       
        // Add a FS to the CAS with the search result
        FeatureStructure searchResult = aCAS.createFS(searchResultType);
View Full Code Here

      if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
        assert !cachedTokenStream;
        tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
        cachedTokenStream = true;
      }
      final MemoryIndex indexer = new MemoryIndex(true);
      indexer.addField(DelegatingAtomicReader.FIELD_NAME, tokenStream);
      tokenStream.reset();
      final IndexSearcher searcher = indexer.createSearcher();
      // MEM index has only atomic ctx
      internalReader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
    }
    return internalReader.getContext();
  }
View Full Code Here

    }

    private static HashSet<String> performSearch(Analyzer a) throws IOException {
        HashSet<String> results = new HashSet<>();
        for (File file : getTestFiles()) {
            MemoryIndex memoryIndex = new MemoryIndex(true);
            final List<String> lines = Files.readAllLines(file.toPath(), Charset.forName("UTF-8"));

            memoryIndex.addField("title", lines.get(0), a);
            StringBuilder sb = new StringBuilder();
            for (String line : lines) {
                sb.append(line);
            }
            memoryIndex.addField("content", sb.toString(), a);

            IndexSearcher searcher = memoryIndex.createSearcher();
            ExistsCollector collector = new ExistsCollector();

            searcher.search(new TermQuery(new Term("content", "אני")), collector);
            if (collector.exists()) {
                results.add(file.getName());
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.memory.MemoryIndex$MemoryIndexReader$MemoryTermsEnum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.