Package org.apache.lucene.index

Examples of org.apache.lucene.index.LeafReaderContext


                d.add(new StringField("high_freq", "5", Field.Store.NO));
            }
            writer.addDocument(d);
        }
        writer.forceMerge(1, true);
        LeafReaderContext context = refreshReader();
        String[] formats = new String[] { "fst", "paged_bytes"};
       
        for (String format : formats) {
            {
                ifdService.clear();
View Full Code Here


            }
            writer.addDocument(d);
        }
        logger.debug(hundred + " " + ten + " " + five);
        writer.forceMerge(1, true);
        LeafReaderContext context = refreshReader();
        String[] formats = new String[] { "fst", "paged_bytes"};
        for (String format : formats) {
            {
                ifdService.clear();
                FieldDataType fieldDataType = new FieldDataType("string", ImmutableSettings.builder().put("format", format)
View Full Code Here

    @Test
    public void testDeletedDocs() throws Exception {
        add2SingleValuedDocumentsAndDeleteOneOfThem();
        IndexFieldData indexFieldData = getForField("value");
        LeafReaderContext readerContext = refreshReader();
        AtomicFieldData fieldData = indexFieldData.load(readerContext);
        SortedBinaryDocValues values = fieldData.getBytesValues();
        for (int i = 0; i < readerContext.reader().maxDoc(); ++i) {
            values.setDocument(i);
            assertThat(values.count(), greaterThanOrEqualTo(1));
        }
    }
View Full Code Here

    @Test
    public void testSingleValueAllSet() throws Exception {
        fillSingleValueAllSet();
        IndexFieldData indexFieldData = getForField("value");
        LeafReaderContext readerContext = refreshReader();
        AtomicFieldData fieldData = indexFieldData.load(readerContext);
        assertThat(fieldData.ramBytesUsed(), greaterThan(0l));

        SortedBinaryDocValues bytesValues = fieldData.getBytesValues();

        bytesValues.setDocument(0);
        assertThat(bytesValues.count(), equalTo(1));
        assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(two())));
        bytesValues.setDocument(1);
        assertThat(bytesValues.count(), equalTo(1));
        assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(one())));
        bytesValues.setDocument(2);
        assertThat(bytesValues.count(), equalTo(1));
        assertThat(bytesValues.valueAt(0), equalTo(new BytesRef(three())));

        assertValues(bytesValues, 0, two());
        assertValues(bytesValues, 1, one());
        assertValues(bytesValues, 2, three());

        IndexSearcher searcher = new IndexSearcher(readerContext.reader());
        TopFieldDocs topDocs;

        topDocs = searcher.search(new MatchAllDocsQuery(), 10,
                new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null))));
        assertThat(topDocs.totalHits, equalTo(3));
View Full Code Here

        int numSegs = 0;
        boolean hasDeletions = false;
        // iterate backwards to optimize for the frequently updated documents
        // which are likely to be in the last segments
        for(int i=leaves.size()-1;i>=0;i--) {
            LeafReaderContext readerContext = leaves.get(i);
            Fields fields = readerContext.reader().fields();
            if (fields != null) {
                Terms terms = fields.terms(UidFieldMapper.NAME);
                if (terms != null) {
                    readerContexts[numSegs] = readerContext;
                    hasPayloads[numSegs] = terms.hasPayloads();
                    termsEnums[numSegs] = terms.iterator(null);
                    assert termsEnums[numSegs] != null;
                    liveDocs[numSegs] = readerContext.reader().getLiveDocs();
                    hasDeletions |= readerContext.reader().hasDeletions();
                    versions[numSegs] = readerContext.reader().getNumericDocValues(VersionFieldMapper.NAME);
                    numSegs++;
                }
            }
        }
        this.numSegs = numSegs;
View Full Code Here

        bytesList2.add(randomBytes());
        doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList2.get(0)).value(bytesList2.get(1)).value(bytesList2.get(0)).endArray().endObject();
        d = mapper.parse("test", "4", doc.bytes());
        writer.addDocument(d.rootDoc());

        LeafReaderContext reader = refreshReader();
        IndexFieldData<?> indexFieldData = getForField("field");
        AtomicFieldData fieldData = indexFieldData.load(reader);

        SortedBinaryDocValues bytesValues = fieldData.getBytesValues();
View Full Code Here

    }

    @Test
    public void testTermsEnum() throws Exception {
        fillExtendedMvSet();
        LeafReaderContext atomicReaderContext = refreshReader();

        IndexOrdinalsFieldData ifd = getForField("value");
        AtomicOrdinalsFieldData afd = ifd.load(atomicReaderContext);

        TermsEnum termsEnum = afd.getOrdinalsValues().termsEnum();
View Full Code Here

        InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
        FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
        for (int index = 0; index < context.docIdsToLoadSize(); index++) {
            int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
            int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
            LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
            int subDocId = docId - subReaderContext.docBase;

            final InternalSearchHit searchHit;
            try {
                int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId);
View Full Code Here

    public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) {
        this.docSearcher = docSearcher;

        IndexReader indexReader = docSearcher.reader();
        LeafReaderContext atomicReaderContext = indexReader.leaves().get(0);
        lookup().setNextReader(atomicReaderContext);
        lookup().setNextDocId(0);
        lookup().source().setNextSource(parsedDocument.source());

        Map<String, SearchHitField> fields = new HashMap<>();
View Full Code Here

        client().admin().indices().prepareRefresh("test").get();
        XContentBuilder builder = filterBuilder.toXContent(JsonXContent.contentBuilder(), ToXContent.EMPTY_PARAMS);
        XContentParser parser = JsonXContent.jsonXContent.createParser(builder.bytes());
        Filter filter = indexService.queryParserService().parseInnerFilter(parser).filter();
        try (Searcher searcher = indexService.shardSafe(0).acquireSearcher("test")) {
            final LeafReaderContext ctx = searcher.reader().leaves().get(0);
            DocIdSet set = filter.getDocIdSet(ctx, null);
            assertEquals(broken, DocIdSets.isBroken(set.iterator()));
        }
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.LeafReaderContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.