Package org.apache.lucene.index.memory

Examples of org.apache.lucene.index.memory.MemoryIndex$MemoryIndexReader$MemoryTermsEnum


      if(wrapToCaching && !(tokenStream instanceof CachingTokenFilter)) {
        assert !cachedTokenStream;
        tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
        cachedTokenStream = true;
      }
      final MemoryIndex indexer = new MemoryIndex(true);
      indexer.addField(DelegatingAtomicReader.FIELD_NAME, tokenStream);
      tokenStream.reset();
      final IndexSearcher searcher = indexer.createSearcher();
      // MEM index has only atomic ctx
      internalReader = new DelegatingAtomicReader(((AtomicReaderContext)searcher.getTopReaderContext()).reader());
    }
    return internalReader.getContext();
  }
View Full Code Here


  }

  private IndexReader getReaderForField(String field) {
    IndexReader reader = (IndexReader) readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, cachedTokenFilter);
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }
    return reader;
  }
View Full Code Here

      tokenStream = new CachingTokenFilter(tokenStream);
      cachedTokenStream = true;
    }
    IndexReader reader = (IndexReader) readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, tokenStream);
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

      tokenStream = new CachingTokenFilter(new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
      cachedTokenStream = true;
    }
    IndexReader reader = readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

        mltQuery.setMinimumShouldMatch("100%");
        mltQuery.setMinWordLen(0);
        mltQuery.setMinDocFreq(0);

        // one document has all values
        MemoryIndex index = new MemoryIndex();
        index.addField("name.first", "apache lucene", new WhitespaceAnalyzer());
        index.addField("name.last", "1 2 3 4", new WhitespaceAnalyzer());

        // two clauses, one for items and one for like_text if set
        BooleanQuery luceneQuery = (BooleanQuery) mltQuery.rewrite(index.createSearcher().getIndexReader());
        BooleanClause[] clauses = luceneQuery.getClauses();

        // check for items
        int minNumberShouldMatch = ((BooleanQuery) (clauses[0].getQuery())).getMinimumNumberShouldMatch();
        assertThat(minNumberShouldMatch, is(4));
View Full Code Here

            return likeTexts.toArray(Fields.EMPTY_ARRAY);
        }
    }

    private static Fields generateFields(String[] fieldNames, String text) throws IOException {
        MemoryIndex index = new MemoryIndex();
        for (String fieldName : fieldNames) {
            index.addField(fieldName, text, new WhitespaceAnalyzer());
        }
        return MultiFields.getFields(index.createSearcher().getIndexReader());
    }
View Full Code Here

    public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
        IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()];
        List<ParseContext.Document> docs = parsedDocument.docs();
        int rootDocIndex = docs.size() - 1;
        assert rootDocIndex > 0;
        MemoryIndex rootDocMemoryIndex = null;
        for (int i = 0; i < docs.size(); i++) {
            ParseContext.Document d = docs.get(i);
            MemoryIndex memoryIndex;
            if (rootDocIndex == i) {
                // the last doc is always the rootDoc, since that is usually the biggest document it make sense
                // to reuse the MemoryIndex it uses
                memoryIndex = rootDocMemoryIndex = cache.get();
            } else {
                memoryIndex = new MemoryIndex(true);
            }
            memoryIndices[i] = indexDoc(d, parsedDocument.analyzer(), memoryIndex).createSearcher().getIndexReader();
        }
        MultiReader mReader = new MultiReader(memoryIndices, true);
        try {
View Full Code Here

    }

    private Fields generateTermVectors(Collection<GetField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer)
            throws IOException {
        /* store document in memory index */
        MemoryIndex index = new MemoryIndex(withOffsets);
        for (GetField getField : getFields) {
            String field = getField.getName();
            Analyzer analyzer = getAnalyzerAtField(field, perFieldAnalyzer);
            for (Object text : getField.getValues()) {
                index.addField(field, text.toString(), analyzer);
            }
        }
        /* and read vectors from it */
        return MultiFields.getFields(index.createSearcher().getIndexReader());
    }
View Full Code Here

        this.cache = cache;
    }

    @Override
    public void prepare(PercolateContext context, ParsedDocument parsedDocument) {
        MemoryIndex memoryIndex = cache.get();
        for (IndexableField field : parsedDocument.rootDoc().getFields()) {
            if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) {
                continue;
            }
            try {
                // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
                // like the indexer does
                TokenStream tokenStream = field.tokenStream(parsedDocument.analyzer(), null);
                if (tokenStream != null) {
                    memoryIndex.addField(field.name(), tokenStream, field.boost());
                }
            } catch (IOException e) {
                throw new ElasticsearchException("Failed to create token stream", e);
            }
        }
View Full Code Here

      tokenStream = new CachingTokenFilter(tokenStream);
      cachedTokenStream = true;
    }
    IndexReader reader = readers.get(field);
    if (reader == null) {
      MemoryIndex indexer = new MemoryIndex();
      indexer.addField(field, tokenStream);
      tokenStream.reset();
      IndexSearcher searcher = indexer.createSearcher();
      reader = searcher.getIndexReader();
      readers.put(field, reader);
    }

    return reader;
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.memory.MemoryIndex$MemoryIndexReader$MemoryTermsEnum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.