Package org.apache.lucene.facet.taxonomy

Examples of org.apache.lucene.facet.taxonomy.TaxonomyReader$ChildrenIterator


    IndexReader ir = iw.getReader();
    tw.commit();

    // prepare index reader and taxonomy.
    TaxonomyReader tr = new LuceneTaxonomyReader(dirs[0][1]);

    // prepare searcher to search against
    IndexSearcher searcher = newSearcher(ir);

    FacetsCollector facetsCollector = performSearch(iParams, tr, ir,
        searcher);

    // Obtain facets results and hand-test them
    assertCorrectResults(facetsCollector);
    assertPostingListExists("$music", "music", ir);
    assertPostingListExists("$literature", "Authors", ir);

    tr.close();
    ir.close();
    searcher.close();
    iw.close();
    tw.close();
    IOUtils.close(dirs[0]);
View Full Code Here


  public static List<FacetResult> searchWithFacets(Directory indexDir,
      Directory taxoDir, FacetIndexingParams iParams) throws Exception {
   
    // prepare index reader and taxonomy.
    IndexReader indexReader = IndexReader.open(indexDir);
    TaxonomyReader taxo = new LuceneTaxonomyReader(taxoDir);
   
    // Get results
    List<FacetResult> results = searchWithFacets(indexReader, taxo, iParams);
   
    // we're done, close the index reader and the taxonomy.
    indexReader.close();
    taxo.close();
    return results;
  }
View Full Code Here

    maps[1] = new MemoryOrdinalMap();

    tw1.addTaxonomies(new Directory[] { dir2, dir3 }, maps);
    tw1.close();

    TaxonomyReader tr = new LuceneTaxonomyReader(dir1);

    // Test that the merged taxonomy now contains what we expect:
    // First all the categories of the original taxonomy, in their original order:
    assertEquals(tr.getPath(0).toString(), "");
    assertEquals(tr.getPath(1).toString(), "Author");
    assertEquals(tr.getPath(2).toString(), "Author/Mark Twain");
    assertEquals(tr.getPath(3).toString(), "Animals");
    assertEquals(tr.getPath(4).toString(), "Animals/Dog");
    // Then the categories new in the new taxonomy, in alphabetical order:
    assertEquals(tr.getPath(5).toString(), "Aardvarks");
    assertEquals(tr.getPath(6).toString(), "Aardvarks/Aaron");
    assertEquals(tr.getPath(7).toString(), "Aardvarks/Bob");
    assertEquals(tr.getPath(8).toString(), "Author/Rob Pike");
    assertEquals(tr.getPath(9).toString(), "Author/Zebra Smith");
    assertEquals(tr.getSize(), 10);

    // Test that the maps contain what we expect
    int[] map0 = maps[0].getMap();
    assertEquals(5, map0.length);
    assertEquals(0, map0[0]);
    assertEquals(1, map0[1]);
    assertEquals(8, map0[2]);
    assertEquals(5, map0[3]);
    assertEquals(7, map0[4]);

    int[] map1 = maps[1].getMap();
    assertEquals(6, map1.length);
    assertEquals(0, map1[0]);
    assertEquals(1, map1[1]);
    assertEquals(9, map1[2]);
    assertEquals(5, map1[3]);
    assertEquals(7, map1[4]);
    assertEquals(6, map1[5]);
   
    tr.close();
    dir1.close();
    dir2.close();
    dir3.close();
  }
View Full Code Here

    tw.close();

    // Check that all original categories in the main taxonomy remain in
    // unchanged, and the rest of the taxonomies are completely unchanged.
    for (int i=0; i<ntaxonomies; i++) {
      TaxonomyReader tr = new LuceneTaxonomyReader(dirs[i]);
      TaxonomyReader copytr = new LuceneTaxonomyReader(copydirs[i]);
      if (i==0) {
        assertTrue(tr.getSize() >= copytr.getSize());
      } else {
        assertEquals(copytr.getSize(), tr.getSize());
      }
      for (int j=0; j<copytr.getSize(); j++) {
        String expected = copytr.getPath(j).toString();
        String got = tr.getPath(j).toString();
        assertTrue("Comparing category "+j+" of taxonomy "+i+": expected "+expected+", got "+got,
            expected.equals(got));
      }
      tr.close();
      copytr.close();
    }

    // Check that all the new categories in the main taxonomy are in
    // lexicographic order. This isn't a requirement of our API, but happens
    // this way in our current implementation.
    TaxonomyReader tr = new LuceneTaxonomyReader(dirs[0]);
    TaxonomyReader copytr = new LuceneTaxonomyReader(copydirs[0]);
    if (tr.getSize() > copytr.getSize()) {
      String prev = tr.getPath(copytr.getSize()).toString();
      for (int j=copytr.getSize()+1; j<tr.getSize(); j++) {
        String n = tr.getPath(j).toString();
        assertTrue(prev.compareTo(n)<0);
        prev=n;
      }
    }
    int oldsize = copytr.getSize(); // remember for later
    tr.close();
    copytr.close();

    // Check that all the categories from other taxonomies exist in the new
    // taxonomy.
    TaxonomyReader main = new LuceneTaxonomyReader(dirs[0]);
    for (int i=1; i<ntaxonomies; i++) {
      TaxonomyReader other = new LuceneTaxonomyReader(dirs[i]);
      for (int j=0; j<other.getSize(); j++) {
        int otherord = main.getOrdinal(other.getPath(j));
        assertTrue(otherord != TaxonomyReader.INVALID_ORDINAL);
      }
      other.close();
    }

    // Check that all the new categories in the merged taxonomy exist in
    // one of the added taxonomies.
    TaxonomyReader[] others = new TaxonomyReader[ntaxonomies-1];
View Full Code Here

    FacetSearchParams fsp = new FacetSearchParams();
    assertEquals("unexpected default facet indexing params class", DefaultFacetIndexingParams.class.getName(), fsp.getFacetIndexingParams().getClass().getName());
    assertEquals("no facet requests should be added by default", 0, fsp.getFacetRequests().size());
    Directory dir = newDirectory();
    new LuceneTaxonomyWriter(dir).close();
    TaxonomyReader tr = new LuceneTaxonomyReader(dir);
    assertEquals("unexpected partition offset for 0 categories", 1, PartitionsUtils.partitionOffset(fsp, 1, tr));
    assertEquals("unexpected partition size for 0 categories", 1, PartitionsUtils.partitionSize(fsp,tr));
    tr.close();
    dir.close();
  }
View Full Code Here

    Directory dir = newDirectory();
    TaxonomyWriter tw = new LuceneTaxonomyWriter(dir);
    tw.addCategory(new CategoryPath("a"));
    tw.commit();
    tw.close();
    TaxonomyReader tr = new LuceneTaxonomyReader(dir);
    assertEquals("unexpected partition offset for 1 categories", 2, PartitionsUtils.partitionOffset(fsp, 1, tr));
    assertEquals("unexpected partition size for 1 categories", 2, PartitionsUtils.partitionSize(fsp,tr));
    tr.close();
    dir.close();
  }
View Full Code Here

    Directory dir1 = newDirectory();
    Directory dir2 = newDirectory();
    // create empty indexes, so that LTR ctor won't complain about a missing index.
    new IndexWriter(dir1, new IndexWriterConfig(TEST_VERSION_CURRENT, null)).close();
    new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, null)).close();
    TaxonomyReader tr1 = new LuceneTaxonomyReader(dir1);
    TaxonomyReader tr2 = new LuceneTaxonomyReader(dir2);
    FacetResultsHandler frh1 = fr.createFacetResultsHandler(tr1);
    FacetResultsHandler frh2 = fr.createFacetResultsHandler(tr2);
    assertTrue("should not return the same FacetResultHandler instance for different TaxonomyReader instances", frh1 != frh2);
    tr1.close();
    tr2.close();
    dir1.close();
    dir2.close();
  }
View Full Code Here

    // in the FRH.
    FacetRequest fr = new CountFacetRequest(new CategoryPath("a"), 10);
    Directory dir = newDirectory();
    // create empty indexes, so that LTR ctor won't complain about a missing index.
    new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, null)).close();
    TaxonomyReader tr = new LuceneTaxonomyReader(dir);
    FacetResultsHandler frh = fr.createFacetResultsHandler(tr);
    fr.setDepth(10);
    assertEquals(FacetRequest.DEFAULT_DEPTH, frh.getFacetRequest().getDepth());
    tr.close();
    dir.close();
  }
View Full Code Here

    slowIndexDir.setSleepMillis(sleepMillis);
    slowTaxoDir.setSleepMillis(sleepMillis);
   
    // Open the slow readers
    IndexReader slowIndexReader = IndexReader.open(indexDir);
    TaxonomyReader slowTaxoReader = new LuceneTaxonomyReader(taxoDir);

    // Class to perform search and return results as threads
    class Multi extends Thread {
      private List<FacetResult> results;
      private FacetIndexingParams iParams;
      private IndexReader indexReader;
      private TaxonomyReader taxoReader;

      public Multi(IndexReader indexReader, TaxonomyReader taxoReader,
                    FacetIndexingParams iParams) {
        this.indexReader = indexReader;
        this.taxoReader = taxoReader;
        this.iParams = iParams;
      }

      public ExampleResult getResults() {
        ExampleResult exampleRes = new ExampleResult();
        exampleRes.setFacetResults(results);
        return exampleRes;
      }

      @Override
      public void run() {
        try {
          results = MultiCLSearcher.searchWithFacets(indexReader, taxoReader, iParams);
        } catch (Exception e) {
          throw new RuntimeException(e);
        }
      }
    }

    // Instantiate threads, but do not start them
    Multi[] multis = new Multi[numThreads];
    for (int i = 0; i < numThreads - 1; i++) {
      multis[i] = new Multi(slowIndexReader, slowTaxoReader, MultiCLIndexer.MULTI_IPARAMS);
    }
    // The last thread uses ONLY the DefaultFacetIndexingParams so that
    // it references a different TFC cache. This will still result
    // in valid results, but will only search one of the category lists
    // instead of all of them.
    multis[numThreads - 1] = new Multi(slowIndexReader, slowTaxoReader, new DefaultFacetIndexingParams());

    // Gentleman, start your engines
    for (Multi m : multis) {
      m.start();
    }

    // Wait for threads and get results
    ExampleResult[] multiResults = new ExampleResult[numThreads];
    for (int i = 0; i < numThreads; i++) {
      multis[i].join();
      multiResults[i] = multis[i].getResults();
    }

    // Each of the (numThreads-1) should have the same predictable
    // results, which we test for here.
    for (int i = 0; i < numThreads - 1; i++) {
      ExampleResult eResults = multiResults[i];
      TestMultiCLExample.assertCorrectMultiResults(eResults);
    }

    // The last thread, which only searched over the
    // DefaultFacetIndexingParams,
    // has its own results
    ExampleResult eResults = multiResults[numThreads - 1];
    List<FacetResult> results = eResults.getFacetResults();
    assertEquals(3, results.size());
    String[] expLabels = new String[] { "5", "5/5", "6/2" };
    double[] expValues = new double[] { 0.0, 0.0, 1.0 };
    for (int i = 0; i < 3; i++) {
      FacetResult result = results.get(i);
      assertNotNull("Result should not be null", result);
      FacetResultNode resNode = result.getFacetResultNode();
      assertEquals("Invalid label", expLabels[i], resNode.getLabel().toString());
      assertEquals("Invalid value", expValues[i], resNode.getValue(), 0.0);
      assertEquals("Invalid number of subresults", 0, resNode.getNumSubResults());
    }
    // we're done, close the index reader and the taxonomy.
    slowIndexReader.close();
    slowTaxoReader.close();
    indexDir.close();
    taxoDir.close();
  }
View Full Code Here

   * @throws Exception on error (no detailed exception handling here for sample simplicity
   * @return facet results
   */
  public static List<FacetResult> searchWithFacets (Directory indexDir, Directory taxoDir) throws Exception {
    // prepare index reader and taxonomy.
    TaxonomyReader taxo = new LuceneTaxonomyReader(taxoDir);
    IndexReader indexReader = IndexReader.open(indexDir);
   
    // prepare searcher to search against
    IndexSearcher searcher = new IndexSearcher(indexReader);
   
    // faceted search is working in 2 steps:
    // 1. collect matching documents
    // 2. aggregate facets for collected documents and
    //    generate the requested faceted results from the aggregated facets
   
    // step 1: collect matching documents into a collector
    Query q = new TermQuery(new Term(SimpleUtils.TEXT,"white"));
    ExampleUtils.log("Query: "+q);
   
    // regular collector for scoring matched documents
    TopScoreDocCollector topDocsCollector = TopScoreDocCollector.create(10, true);
   
    // docids collector for guiding facets accumulation (scoring disabled)
    ScoredDocIdCollector docIdsCollecor = ScoredDocIdCollector.create(indexReader.maxDoc(), false);
   
    // Faceted search parameters indicate which facets are we interested in
    FacetSearchParams facetSearchParams = new FacetSearchParams();
    facetSearchParams.addFacetRequest(new CountFacetRequest(new CategoryPath("root","a"), 10));
   
    // search, into both collectors. note: in case only facets accumulation
    // is required, the topDocCollector part can be totally discarded
    searcher.search(q, MultiCollector.wrap(topDocsCollector, docIdsCollecor));
       
    // Obtain facets results and print them
    AdaptiveFacetsAccumulator accumulator = new AdaptiveFacetsAccumulator(facetSearchParams, indexReader, taxo);
    List<FacetResult> res = accumulator.accumulate(docIdsCollecor.getScoredDocIDs());
   
    int i = 0;
    for (FacetResult facetResult : res) {
      ExampleUtils.log("Res "+(i++)+": "+facetResult);
    }
   
    // we're done, close the index reader and the taxonomy.
    indexReader.close();
    taxo.close();
   
    return res;
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.facet.taxonomy.TaxonomyReader$ChildrenIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.