Package org.apache.lucene.search

Examples of org.apache.lucene.search.Weight


      this.setter = setter;
    }

    @Override
    public Weight createWeight(final IndexSearcher searcher) throws IOException {
      Weight w = originalQuery.createWeight(searcher);
      setter.set(w);
      return w;
    }
View Full Code Here


    return result;
  }

  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight originalWeight = originalQuery.createWeight(searcher);
    return new Weight() {

      private TermsEnum segmentTermsEnum;

      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        SVInnerScorer scorer = (SVInnerScorer) scorer(context, false, false, context.reader().getLiveDocs());
        if (scorer != null) {
          if (scorer.advanceForExplainOnly(doc) == doc) {
            return scorer.explain();
          }
        }
        return new ComplexExplanation(false, 0.0f, "Not a match");
      }

      @Override
      public Query getQuery() {
        return TermsIncludingScoreQuery.this;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return originalWeight.getValueForNormalization() * TermsIncludingScoreQuery.this.getBoost() * TermsIncludingScoreQuery.this.getBoost();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        originalWeight.normalize(norm, topLevelBoost * TermsIncludingScoreQuery.this.getBoost());
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
        Terms terms = context.reader().terms(field);
View Full Code Here

    }
  }
 
  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight baseWeight = baseQuery.createWeight(searcher);

    return new Weight() {
      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        return baseWeight.explain(context, doc);
      }

      @Override
      public Query getQuery() {
        return baseQuery;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return baseWeight.getValueForNormalization();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        baseWeight.normalize(norm, topLevelBoost);
      }

      @Override
      public boolean scoresDocsOutOfOrder() {
        // TODO: would be nice if AssertingIndexSearcher
        // confirmed this for us
        return false;
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
                           boolean topScorer, Bits acceptDocs) throws IOException {

        DrillSidewaysScorer.DocsEnumsAndFreq[] dims = new DrillSidewaysScorer.DocsEnumsAndFreq[drillDownTerms.length];
        TermsEnum termsEnum = null;
        String lastField = null;
        int nullCount = 0;
        for(int dim=0;dim<dims.length;dim++) {
          dims[dim] = new DrillSidewaysScorer.DocsEnumsAndFreq();
          dims[dim].sidewaysCollector = drillSidewaysCollectors[dim];
          String field = drillDownTerms[dim][0].field();
          dims[dim].dim = drillDownTerms[dim][0].text();
          if (lastField == null || !lastField.equals(field)) {
            AtomicReader reader = context.reader();
            Terms terms = reader.terms(field);
            if (terms != null) {
              termsEnum = terms.iterator(null);
            } else {
              termsEnum = null;
            }
            lastField = field;
          }
          dims[dim].docsEnums = new DocsEnum[drillDownTerms[dim].length];
          if (termsEnum == null) {
            nullCount++;
            continue;
          }
          for(int i=0;i<drillDownTerms[dim].length;i++) {
            if (termsEnum.seekExact(drillDownTerms[dim][i].bytes(), false)) {
              DocsEnum docsEnum = termsEnum.docs(null, null, 0);
              if (docsEnum != null) {
                dims[dim].docsEnums[i] = docsEnum;
                dims[dim].maxCost = Math.max(dims[dim].maxCost, docsEnum.cost());
              }
            }
          }
        }

        if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) {
          return null;
        }

        // Sort drill-downs by most restrictive first:
        Arrays.sort(dims);

        // TODO: it could be better if we take acceptDocs
        // into account instead of baseScorer?
        Scorer baseScorer = baseWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);

        if (baseScorer == null) {
          return null;
        }
View Full Code Here

   * not a direct test of NearSpans, but a demonstration of how/when
   * this causes problems
   */
  public void testSpanNearScorerSkipTo1() throws Exception {
    SpanNearQuery q = makeQuery();
    Weight w = q.createWeight(searcher);
    Scorer s = w.scorer(searcher.getIndexReader());
    assertEquals(true, s.skipTo(1));
    assertEquals(1, s.doc());
  }
View Full Code Here

   * not a direct test of NearSpans, but a demonstration of how/when
   * this causes problems
   */
  public void testSpanNearScorerExplain() throws Exception {
    SpanNearQuery q = makeQuery();
    Weight w = q.createWeight(searcher);
    Scorer s = w.scorer(searcher.getIndexReader());
    Explanation e = s.explain(1);
    assertTrue("Scorer explanation value for doc#1 isn't positive: "
               + e.toString(),
               0.0f < e.getValue());
  }
View Full Code Here

   * @throws IOException in case a search exception occurs
   */
  private void updateTopDocs(int n) throws IOException {
    int totalMaxDocs = searcher.getSearcher().maxDoc();
    final int maxDocs = Math.min( n, totalMaxDocs );
    final Weight weight = preparedQuery.weight( searcher.getSearcher() );

    final TopDocsCollector<?> topDocCollector;
    final TotalHitCountCollector hitCountCollector;
    Collector collector = null;
    if ( maxDocs != 0 ) {
View Full Code Here

    iter = deletesFlushed.queries.entrySet().iterator();
    while(iter.hasNext()) {
      Entry entry = (Entry) iter.next();
      Query query = (Query) entry.getKey();
      int limit = ((Integer) entry.getValue()).intValue();
      Weight weight = query.weight(searcher);
      Scorer scorer = weight.scorer(reader);
      while(scorer.next()) {
        final int docID = scorer.doc();
        if (docIDStart + docID >= limit)
          break;
        reader.deleteDocument(docID);
View Full Code Here

    }
  }
 
  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight baseWeight = baseQuery.createWeight(searcher);

    return new Weight() {
      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        return baseWeight.explain(context, doc);
      }

      @Override
      public Query getQuery() {
        return baseQuery;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return baseWeight.getValueForNormalization();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        baseWeight.normalize(norm, topLevelBoost);
      }

      @Override
      public boolean scoresDocsOutOfOrder() {
        // TODO: would be nice if AssertingIndexSearcher
        // confirmed this for us
        return false;
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
                           boolean topScorer, Bits acceptDocs) throws IOException {

        DrillSidewaysScorer.DocsEnumsAndFreq[] dims = new DrillSidewaysScorer.DocsEnumsAndFreq[drillDownTerms.length];
        TermsEnum termsEnum = null;
        String lastField = null;
        int nullCount = 0;
        for(int dim=0;dim<dims.length;dim++) {
          dims[dim] = new DrillSidewaysScorer.DocsEnumsAndFreq();
          dims[dim].sidewaysCollector = drillSidewaysCollectors[dim];
          String field = drillDownTerms[dim][0].field();
          dims[dim].dim = drillDownTerms[dim][0].text();
          if (lastField == null || !lastField.equals(field)) {
            AtomicReader reader = context.reader();
            Terms terms = reader.terms(field);
            if (terms != null) {
              termsEnum = terms.iterator(null);
            }
            lastField = field;
          }
          if (termsEnum == null) {
            nullCount++;
            continue;
          }
          dims[dim].docsEnums = new DocsEnum[drillDownTerms[dim].length];
          for(int i=0;i<drillDownTerms[dim].length;i++) {
            if (termsEnum.seekExact(drillDownTerms[dim][i].bytes(), false)) {
              dims[dim].freq = Math.max(dims[dim].freq, termsEnum.docFreq());
              dims[dim].docsEnums[i] = termsEnum.docs(null, null);
            }
          }
        }

        if (nullCount > 1) {
          return null;
        }

        // Sort drill-downs by most restrictive first:
        Arrays.sort(dims);

        // TODO: it could be better if we take acceptDocs
        // into account instead of baseScorer?
        Scorer baseScorer = baseWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);

        if (baseScorer == null) {
          return null;
        }
View Full Code Here

    return result;
  }

  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    final Weight originalWeight = originalQuery.createWeight(searcher);
    return new Weight() {

      private TermsEnum segmentTermsEnum;

      @Override
      public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
        SVInnerScorer scorer = (SVInnerScorer) scorer(context, false, false, context.reader().getLiveDocs());
        if (scorer != null) {
          if (scorer.advanceForExplainOnly(doc) == doc) {
            return scorer.explain();
          }
        }
        return new ComplexExplanation(false, 0.0f, "Not a match");
      }

      @Override
      public Query getQuery() {
        return TermsIncludingScoreQuery.this;
      }

      @Override
      public float getValueForNormalization() throws IOException {
        return originalWeight.getValueForNormalization() * TermsIncludingScoreQuery.this.getBoost() * TermsIncludingScoreQuery.this.getBoost();
      }

      @Override
      public void normalize(float norm, float topLevelBoost) {
        originalWeight.normalize(norm, topLevelBoost * TermsIncludingScoreQuery.this.getBoost());
      }

      @Override
      public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
        Terms terms = context.reader().terms(field);
View Full Code Here

          try {
            List<Query> filters = rb.getFilters();
            if (filters!=null){
              final ArrayList<DocIdSet> docsets = new ArrayList<DocIdSet>(filters.size());
                for (Query filter : filters){
                  Weight weight = filter.createWeight(rb.req.getSearcher());
                  final Scorer scorer = weight.scorer(reader, false, true);
                  docsets.add(new DocIdSet(){
                @Override
                public DocIdSetIterator iterator() throws IOException {
                  return scorer;
                }
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.Weight

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.