Package org.apache.lucene.index

Examples of org.apache.lucene.index.AtomicReader


    @Override
    public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
        boolean topScorer, Bits acceptDocs) throws IOException {
      assert !termArrays.isEmpty();
      final AtomicReader reader = context.reader();
      final Bits liveDocs = acceptDocs;
     
      PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()];

      final Terms fieldTerms = reader.terms(field);
      if (fieldTerms == null) {
        return null;
      }

      // Reuse single TermsEnum below:
View Full Code Here


    @Override
    public List<AtomicReader> getMergeReaders() throws IOException {
      if (unsortedReaders == null) {
        unsortedReaders = super.getMergeReaders();
        final AtomicReader atomicView;
        if (unsortedReaders.size() == 1) {
          atomicView = unsortedReaders.get(0);
        } else {
          final IndexReader multiReader = new MultiReader(unsortedReaders.toArray(new AtomicReader[unsortedReaders.size()]));
          atomicView = SlowCompositeReaderWrapper.wrap(multiReader);
View Full Code Here

    createRandomIndex(numdocs, writer, random());
    writer.commit();

    DirectoryReader open = DirectoryReader.open(dir);
    for (AtomicReaderContext ctx : open.leaves()) {
      AtomicReader indexReader = ctx.reader();
      Terms terms = indexReader.terms("body");
      TermsEnum iterator = terms.iterator(null);
      IdentityHashMap<DocsEnum, Boolean> enums = new IdentityHashMap<DocsEnum, Boolean>();
      MatchNoBits bits = new Bits.MatchNoBits(indexReader.maxDoc());
      while ((iterator.next()) != null) {
        DocsEnum docs = iterator.docs(random().nextBoolean() ? bits : new Bits.MatchNoBits(indexReader.maxDoc()), null, random().nextBoolean() ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
        enums.put(docs, true);
      }
     
      assertEquals(terms.size(), enums.size());
    }
View Full Code Here

 
  public DocsEnum randomDocsEnum(String field, BytesRef term, List<AtomicReaderContext> readers, Bits bits) throws IOException {
    if (random().nextInt(10) == 0) {
      return null;
    }
    AtomicReader indexReader = readers.get(random().nextInt(readers.size())).reader();
    Terms terms = indexReader.terms(field);
    if (terms == null) {
      return null;
    }
    TermsEnum iterator = terms.iterator(null);
    if (iterator.seekExact(term)) {
View Full Code Here

  }
 
  private void verify(Directory dir) throws Exception {
    DirectoryReader ir = DirectoryReader.open(dir);
    for (AtomicReaderContext leaf : ir.leaves()) {
      AtomicReader leafReader = leaf.reader();
      assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true);
      assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true);
      assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true);
      assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true);
      assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"), true);
      assertTerms(leafReader.terms("field7payloadsFixedOffsets"), leafReader.terms("field8payloadsVariableOffsets"), true);
    }
    ir.close();
  }
View Full Code Here

    }
    writer.forceMerge(1);
    final DirectoryReader indexReader = writer.getReader();
    writer.close();

    final AtomicReader reader = getOnlySegmentReader(indexReader);
    final Filter parentsFilter = new FixedBitSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("parent", "true"))));
    final FixedBitSet parentBits = (FixedBitSet) parentsFilter.getDocIdSet(reader.getContext(), null);

    final NumericDocValues parentValues = reader.getNumericDocValues("parent_val");
    final Sorter.DocComparator parentComparator = new Sorter.DocComparator() {
      @Override
      public int compare(int docID1, int docID2) {
        assertTrue(parentBits.get(docID1));
        assertTrue(parentBits.get(docID2));
        final long v1 = parentValues.get(docID1);
        final long v2 = parentValues.get(docID2);
        return v1 < v2 ? -1 : v1 == v2 ? 0 : 1;
      }
    };

    final NumericDocValues childValues = reader.getNumericDocValues("child_val");
    final Sorter.DocComparator childComparator = new Sorter.DocComparator() {
      @Override
      public int compare(int docID1, int docID2) {
        assertFalse(parentBits.get(docID1));
        assertFalse(parentBits.get(docID2));
        final long v1 = childValues.get(docID1);
        final long v2 = childValues.get(docID2);
        return v1 < v2 ? -1 : v1 == v2 ? 0 : 1;
      }
    };

    final Sorter sorter = new BlockJoinSorter(parentsFilter) {
     
      @Override
      public String getID() {
        return "Dummy";
      }
     
      @Override
      protected DocComparator getParentComparator(AtomicReader r) {
        assertEquals(reader, r);
        return parentComparator;
      }

      @Override
      protected DocComparator getChildComparator(AtomicReader r) {
        assertEquals(reader, r);
        return childComparator;
      }

    };
    final Sorter.DocMap docMap = sorter.sort(reader);
    assertEquals(reader.maxDoc(), docMap.size());

    int[] children = new int[1];
    int numChildren = 0;
    int previousParent = -1;
    for (int i = 0; i < docMap.size(); ++i) {
View Full Code Here

  protected void checkCorrectClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName) throws Exception {
    checkCorrectClassification(classifier, inputDoc, expectedResult, analyzer, textFieldName, classFieldName, null);
  }

  protected void checkCorrectClassification(Classifier<T> classifier, String inputDoc, T expectedResult, Analyzer analyzer, String textFieldName, String classFieldName, Query query) throws Exception {
    AtomicReader atomicReader = null;
    try {
      populateSampleIndex(analyzer);
      atomicReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader());
      classifier.train(atomicReader, textFieldName, classFieldName, analyzer, query);
      ClassificationResult<T> classificationResult = classifier.assignClass(inputDoc);
      assertNotNull(classificationResult.getAssignedClass());
      assertEquals("got an assigned class of " + classificationResult.getAssignedClass(), expectedResult, classificationResult.getAssignedClass());
      assertTrue("got a not positive score " + classificationResult.getScore(), classificationResult.getScore() > 0);
    } finally {
      if (atomicReader != null)
        atomicReader.close();
    }
  }
View Full Code Here

        atomicReader.close();
    }
  }

  protected void checkPerformance(Classifier<T> classifier, Analyzer analyzer, String classFieldName) throws Exception {
    AtomicReader atomicReader = null;
    long trainStart = System.currentTimeMillis();
    try {
      populatePerformanceIndex(analyzer);
      atomicReader = SlowCompositeReaderWrapper.wrap(indexWriter.getReader());
      classifier.train(atomicReader, textFieldName, classFieldName, analyzer);
      long trainEnd = System.currentTimeMillis();
      long trainTime = trainEnd - trainStart;
      assertTrue("training took more than 2 mins : " + trainTime / 1000 + "s", trainTime < 120000);
    } finally {
      if (atomicReader != null)
        atomicReader.close();
    }
  }
View Full Code Here

    doc.add(new TextField("foo", "a b b c c c d e f g g h i i j j k", Field.Store.NO));
    iw.addDocument(doc);
    DirectoryReader ir = iw.getReader();
    iw.close();
   
    AtomicReader segment = getOnlySegmentReader(ir);
    DocsEnum reuse = null;
    Map<DocsEnum,Boolean> allEnums = new IdentityHashMap<DocsEnum,Boolean>();
    TermsEnum te = segment.terms("foo").iterator(null);
    while (te.next() != null) {
      reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE);
      allEnums.put(reuse, true);
    }
   
    assertEquals(2, allEnums.size());
   
    allEnums.clear();
    DocsAndPositionsEnum posReuse = null;
    te = segment.terms("foo").iterator(null);
    while (te.next() != null) {
      posReuse = te.docsAndPositions(null, posReuse);
      allEnums.put(posReuse, true);
    }
   
View Full Code Here

    // but this seems 'good enough' for now.
    iw.addDocument(doc);
    DirectoryReader ir = iw.getReader();
    iw.close();
   
    AtomicReader segment = getOnlySegmentReader(ir);
    DocsEnum reuse = null;
    Map<DocsEnum,Boolean> allEnums = new IdentityHashMap<DocsEnum,Boolean>();
    TermsEnum te = segment.terms("foo").iterator(null);
    while (te.next() != null) {
      reuse = te.docs(null, reuse, DocsEnum.FLAG_NONE);
      allEnums.put(reuse, true);
    }
   
    assertEquals(4, allEnums.size());
   
    allEnums.clear();
    DocsAndPositionsEnum posReuse = null;
    te = segment.terms("foo").iterator(null);
    while (te.next() != null) {
      posReuse = te.docsAndPositions(null, posReuse);
      allEnums.put(posReuse, true);
    }
   
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.AtomicReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.