Package org.apache.lucene.index

Examples of org.apache.lucene.index.DirectoryReader


      indexWriter.addDocument(doc);
    }
    IOUtils.close(indexWriter, taxoWriter);
   
    // test the multi iterator
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
    CategoryListIterator[] iterators = new CategoryListIterator[numDimensions];
    for (int i = 0; i < iterators.length; i++) {
      CategoryListParams clp = indexingParams.getCategoryListParams(new CategoryPath(dimensions[i]));
      IntDecoder decoder = clp.createEncoder().createMatchingDecoder();
      iterators[i] = new DocValuesCategoryListIterator(clp.field, decoder);
    }
    MultiCategoryListIterator cli = new MultiCategoryListIterator(iterators);
    for (AtomicReaderContext context : indexReader.leaves()) {
      assertTrue("failed to init multi-iterator", cli.setNextReader(context));
      IntsRef ordinals = new IntsRef();
      final int maxDoc = context.reader().maxDoc();
      for (int i = 0; i < maxDoc; i++) {
        cli.getOrdinals(i, ordinals);
View Full Code Here


      System.out.println();
    } else {
      assertTrue(memory.getMemorySize() > 0L);
    }
    AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
    DirectoryReader competitor = DirectoryReader.open(ramdir);
    duellReaders(competitor, reader);
    IOUtils.close(reader, competitor);
    assertAllQueries(memory, ramdir, analyzer)
    ramdir.close();   
  }
View Full Code Here

      writer.addDocument(doc);
      writer.close();
      for (IndexableField field : doc.getFields()) {
          memory.addField(field.name(), ((Field)field).stringValue(), mockAnalyzer)
      }
      DirectoryReader competitor = DirectoryReader.open(dir);
      AtomicReader memIndexReader= (AtomicReader) memory.createSearcher().getIndexReader();
      duellReaders(competitor, memIndexReader);
      IOUtils.close(competitor, memIndexReader);
      memory.reset();
      dir.close();
View Full Code Here

    writer.commit();
    serverReplicator.publish(new IndexRevision(writer));
  }
 
  private void reopenReader() throws IOException {
    DirectoryReader newReader = DirectoryReader.openIfChanged(reader);
    assertNotNull(newReader);
    reader.close();
    reader = newReader;
  }
View Full Code Here

    }
   
    taxonomyWriter.close();
    iw.close();
   
    DirectoryReader r = DirectoryReader.open(indexDir);
    DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);

    ValueSource valueSource = new LongFieldSource("price");
    FacetSearchParams fsp = new FacetSearchParams(new SumValueSourceFacetRequest(new CategoryPath("a"), 10, valueSource, false));
    FacetsCollector fc = FacetsCollector.create(fsp, r, taxo);
View Full Code Here

    }
   
    taxonomyWriter.close();
    iw.close();
   
    DirectoryReader r = DirectoryReader.open(indexDir);
    DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);

    ValueSource valueSource = new ValueSource() {
      @Override
      public FunctionValues getValues(@SuppressWarnings("rawtypes") Map context, AtomicReaderContext readerContext) throws IOException {
View Full Code Here

    }
   
    taxonomyWriter.close();
    iw.close();
   
    DirectoryReader r = DirectoryReader.open(indexDir);
    DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);

    ValueSource valueSource = new LongFieldSource("price");
    FacetSearchParams fsp = new FacetSearchParams(fip, new SumValueSourceFacetRequest(new CategoryPath("a"), 10, valueSource, false));
    FacetsCollector fc = FacetsCollector.create(fsp, r, taxo);
View Full Code Here

    taxoWriter.close();
  }

  /** User runs a query and aggregates facets. */
  private List<FacetResult> search() throws IOException, ParseException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Aggregate categories by an expression that combines the document's score
    // and its popularity field
    Expression expr = JavascriptCompiler.compile("_score * sqrt(popularity)");
    SimpleBindings bindings = new SimpleBindings();
    bindings.add(new SortField("_score", SortField.Type.SCORE)); // the score of the document
    bindings.add(new SortField("popularity", SortField.Type.LONG)); // the value of the 'popularity' field

    FacetSearchParams fsp = new FacetSearchParams(
        new SumValueSourceFacetRequest(new CategoryPath("A"), 10, expr.getValueSource(bindings), true));

    // Aggregates the facet values
    FacetsCollector fc = FacetsCollector.create(fsp, searcher.getIndexReader(), taxoReader);

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query, and use MultiCollector to
    // wrap collecting the "normal" hits and also facets:
    searcher.search(new MatchAllDocsQuery(), fc);

    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();
   
    indexReader.close();
    taxoReader.close();
   
    return facetResults;
  }
View Full Code Here

    taxoWriter.close();
  }

  /** User runs a query and aggregates facets by summing their association values. */
  private List<FacetResult> sumAssociations() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
   
    CategoryPath tags = new CategoryPath("tags");
    CategoryPath genre = new CategoryPath("genre");
    FacetSearchParams fsp = new FacetSearchParams(new SumIntAssociationFacetRequest(tags, 10),
        new SumFloatAssociationFacetRequest(genre, 10));
    FacetsCollector fc = FacetsCollector.create(fsp, indexReader, taxoReader);
   
    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query, and use MultiCollector to
    // wrap collecting the "normal" hits and also facets:
    searcher.search(new MatchAllDocsQuery(), fc);
   
    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();
   
    indexReader.close();
    taxoReader.close();
   
    return facetResults;
  }
View Full Code Here

    taxoWriter.close();
  }

  /** User runs a query and counts facets. */
  private List<FacetResult> search() throws IOException {
    DirectoryReader indexReader = DirectoryReader.open(indexDir);
    IndexSearcher searcher = new IndexSearcher(indexReader);
    TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);

    // Count both "Publish Date" and "Author" dimensions
    FacetSearchParams fsp = new FacetSearchParams(
        new CountFacetRequest(new CategoryPath("Publish Date"), 10),
        new CountFacetRequest(new CategoryPath("Author"), 10));

    // Aggregates the facet counts
    FacetsCollector fc = FacetsCollector.create(fsp, searcher.getIndexReader(), taxoReader);

    // MatchAllDocsQuery is for "browsing" (counts facets
    // for all non-deleted docs in the index); normally
    // you'd use a "normal" query, and use MultiCollector to
    // wrap collecting the "normal" hits and also facets:
    searcher.search(new MatchAllDocsQuery(), fc);

    // Retrieve results
    List<FacetResult> facetResults = fc.getFacetResults();
   
    indexReader.close();
    taxoReader.close();
   
    return facetResults;
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.DirectoryReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.