Package org.apache.lucene.search

Examples of org.apache.lucene.search.Weight


    final TopDocsCollector<?> topCollector;
    Collector maybeTimeLimitingCollector; //need that because TimeLimitingCollector is not a TopDocsCollector
    //copied from IndexSearcher#search
    //we only accept indexSearcher atm which means we can copy its the Collector creation strategy
    final int maxDocs = Math.min( n, searcher.getSearcher().maxDoc() );
    final Weight weight = preparedQuery.weight( searcher.getSearcher() );
    if ( sort == null ) {
      topCollector = TopScoreDocCollector.create(maxDocs, !weight.scoresDocsOutOfOrder());
    }
    else {
      boolean fillFields = true;
      topCollector = TopFieldCollector.create(
          sort,
          maxDocs,
              fillFields,
          searcher.isFieldSortDoTrackScores(),
          searcher.isFieldSortDoMaxScore(),
          !weight.scoresDocsOutOfOrder()
      );
    }
    maybeTimeLimitingCollector = topCollector;
    boolean timeoutAt0 = false;
    if ( timeoutManager.getType() == TimeoutManager.Type.LIMIT ) {
View Full Code Here


  public Query clone() {
    return new SuperQuery((Query) _query.clone(), scoreType, primeDocTerm, _rewritten);
  }

  public Weight createWeight(IndexSearcher searcher) throws IOException {
    Weight weight = _query.createWeight(searcher);
    return new SuperWeight(weight, _query.toString(), this, scoreType, primeDocTerm);
  }
View Full Code Here

    this.query = query;
  }

  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    Weight weight = query.createWeight(searcher);
    return new SlowWeight(this, weight);
  }
View Full Code Here

    return new FacetQuery(_query.rewrite(reader), facets, counts, true);
  }

  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    Weight weight = _query.createWeight(searcher);
    return new FacetWeight(weight, getWeights(searcher), counts);
  }
View Full Code Here

    iter = deletesFlushed.queries.entrySet().iterator();
    while(iter.hasNext()) {
      Entry entry = (Entry) iter.next();
      Query query = (Query) entry.getKey();
      int limit = ((Integer) entry.getValue()).intValue();
      Weight weight = query.weight(searcher);
      Scorer scorer = weight.scorer(reader, true, false);
      if (scorer != null) {
        while(true)  {
          int doc = scorer.nextDoc();
          if (((long) docIDStart) + doc >= limit)
            break;
View Full Code Here

   * not a direct test of NearSpans, but a demonstration of how/when
   * this causes problems
   */
  public void testSpanNearScorerSkipTo1() throws Exception {
    SpanNearQuery q = makeQuery();
    Weight w = searcher.createNormalizedWeight(q);
    Scorer s = w.scorer(searcher.getIndexReader(), true, false);
    assertEquals(1, s.advance(1));
  }
View Full Code Here

    }
  }
 
  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight baseWeight = baseQuery.createWeight(searcher);

    return new Weight() {
      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        return baseWeight.explain(context, doc);
      }

      @Override
      public Query getQuery() {
        return baseQuery;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return baseWeight.getValueForNormalization();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        baseWeight.normalize(norm, topLevelBoost);
      }

      @Override
      public boolean scoresDocsOutOfOrder() {
        // TODO: would be nice if AssertingIndexSearcher
        // confirmed this for us
        return false;
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
                           boolean topScorer, Bits acceptDocs) throws IOException {

        DrillSidewaysScorer.DocsEnumsAndFreq[] dims = new DrillSidewaysScorer.DocsEnumsAndFreq[drillDownTerms.length];
        TermsEnum termsEnum = null;
        String lastField = null;
        int nullCount = 0;
        for(int dim=0;dim<dims.length;dim++) {
          dims[dim] = new DrillSidewaysScorer.DocsEnumsAndFreq();
          dims[dim].sidewaysCollector = drillSidewaysCollectors[dim];
          String field = drillDownTerms[dim][0].field();
          dims[dim].dim = drillDownTerms[dim][0].text();
          if (lastField == null || !lastField.equals(field)) {
            AtomicReader reader = context.reader();
            Terms terms = reader.terms(field);
            if (terms != null) {
              termsEnum = terms.iterator(null);
            } else {
              termsEnum = null;
            }
            lastField = field;
          }
          dims[dim].docsEnums = new DocsEnum[drillDownTerms[dim].length];
          if (termsEnum == null) {
            nullCount++;
            continue;
          }
          for(int i=0;i<drillDownTerms[dim].length;i++) {
            if (termsEnum.seekExact(drillDownTerms[dim][i].bytes())) {
              DocsEnum docsEnum = termsEnum.docs(null, null, 0);
              if (docsEnum != null) {
                dims[dim].docsEnums[i] = docsEnum;
                dims[dim].maxCost = Math.max(dims[dim].maxCost, docsEnum.cost());
              }
            }
          }
        }

        if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) {
          return null;
        }

        // Sort drill-downs by most restrictive first:
        Arrays.sort(dims);

        // TODO: it could be better if we take acceptDocs
        // into account instead of baseScorer?
        Scorer baseScorer = baseWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);

        if (baseScorer == null) {
          return null;
        }
View Full Code Here

    iter = deletesFlushed.queries.entrySet().iterator();
    while(iter.hasNext()) {
      Entry entry = (Entry) iter.next();
      Query query = (Query) entry.getKey();
      int limit = ((Integer) entry.getValue()).intValue();
      Weight weight = query.weight(searcher);
      Scorer scorer = weight.scorer(reader, true, false);
      if (scorer != null) {
        while(true)  {
          int doc = scorer.nextDoc();
          if (((long) docIDStart) + doc >= limit)
            break;
View Full Code Here

      this.setter = setter;
    }

    @Override
    public Weight createWeight(final IndexSearcher searcher) throws IOException {
      Weight w = originalQuery.createWeight(searcher);
      setter.set(w);
      return w;
    }
View Full Code Here

    return result;
  }

  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight originalWeight = originalQuery.createWeight(searcher);
    return new Weight() {

      private TermsEnum segmentTermsEnum;

      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        SVInnerScorer scorer = (SVInnerScorer) scorer(context, false, false, context.reader().getLiveDocs());
        if (scorer != null) {
          if (scorer.advanceForExplainOnly(doc) == doc) {
            return scorer.explain();
          }
        }
        return new ComplexExplanation(false, 0.0f, "Not a match");
      }

      @Override
      public Query getQuery() {
        return TermsIncludingScoreQuery.this;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return originalWeight.getValueForNormalization() * TermsIncludingScoreQuery.this.getBoost() * TermsIncludingScoreQuery.this.getBoost();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        originalWeight.normalize(norm, topLevelBoost * TermsIncludingScoreQuery.this.getBoost());
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
        Terms terms = context.reader().terms(field);
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Weight

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.