Package org.apache.lucene.search

Examples of org.apache.lucene.search.HitCollector


            }
        }

        private void collectContextHits() throws IOException {
            if (!contextHitsCalculated) {
                contextScorer.score(new HitCollector() {
                    public void collect(int doc, float score) {
                        contextHits.set(doc);
                    }
                }); // find all
                contextHitsCalculated = true;
View Full Code Here


        try {
            IndexSearcher searcher = new IndexSearcher(reader);
            try {
                Query q = new TermQuery(new Term(
                        FieldNames.WEAK_REFS, id.toString()));
                searcher.search(q, new HitCollector() {
                    public void collect(int doc, float score) {
                        docs.add(doc);
                    }
                });
            } finally {
View Full Code Here

 
  // default similarity should put docs with shorter length first
  searcher = new IndexSearcher(store);
  searcher.search
      (new TermQuery(new Term("field", "word")),
       new HitCollector() {
     public final void collect(int doc, float score) {
         scores[doc] = score;
     }
       });
  searcher.close();
 
  lastScore = Float.MAX_VALUE;
  for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
  }

  // override the norms to be inverted
  Similarity s = new DefaultSimilarity() {
    public float lengthNorm(String fieldName, int numTokens) {
        return (float)numTokens;
    }
      };
  LengthNormModifier lnm = new LengthNormModifier(store, s);
  lnm.reSetNorms("field");

  // new norm (with default similarity) should put longer docs first
  searcher = new IndexSearcher(store);
  searcher.search
      (new TermQuery(new Term("field", "word")),
       new HitCollector() {
     public final void collect(int doc, float score) {
         scores[doc] = score;
     }
       });
  searcher.close();
View Full Code Here

    IndexSearcher searcher = new IndexSearcher(store);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should put docs with shorter length first
    searcher.search(new TermQuery(new Term("field", "word")), new HitCollector() {
      public final void collect(int doc, float score) {
        scores[doc] = score;
      }
    });
    searcher.close();
   
    lastScore = Float.MAX_VALUE;
    for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", " + scores[i] + " <= " + lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
    }

    FieldNormModifier fnm = new FieldNormModifier(store, s);
    fnm.reSetNorms("field");
   
    // new norm (with default similarity) should put longer docs first
    searcher = new IndexSearcher(store);
    searcher.search(new TermQuery(new Term("field", "word"))new HitCollector() {
      public final void collect(int doc, float score) {
        scores[doc] = score;
      }
    });
    searcher.close();
View Full Code Here

    IndexSearcher searcher = new IndexSearcher(store);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should return the same score for all documents for this query
    searcher.search(new TermQuery(new Term("untokfield", "20061212")), new HitCollector() {
      public final void collect(int doc, float score) {
        scores[doc] = score;
      }
    });
    searcher.close();
View Full Code Here

 
  // default similarity should put docs with shorter length first
  searcher = new IndexSearcher(store);
  searcher.search
      (new TermQuery(new Term("field", "word")),
       new HitCollector() {
     public final void collect(int doc, float score) {
         scores[doc] = score;
     }
       });
  searcher.close();
 
  lastScore = Float.MAX_VALUE;
  for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
  }

  // override the norms to be inverted
  Similarity s = new DefaultSimilarity() {
    public float lengthNorm(String fieldName, int numTokens) {
        return (float)numTokens;
    }
      };
  LengthNormModifier lnm = new LengthNormModifier(store, s);
  lnm.reSetNorms("field");

  // new norm (with default similarity) should put longer docs first
  searcher = new IndexSearcher(store);
  searcher.search
      (new TermQuery(new Term("field", "word")),
       new HitCollector() {
     public final void collect(int doc, float score) {
         scores[doc] = score;
     }
       });
  searcher.close();
View Full Code Here

         if (hits == null)
         {
            hits = new BitSet(reader.maxDoc());

            final IOException[] ex = new IOException[1];
            contextScorer.score(new HitCollector()
            {
               public void collect(int doc, float score)
               {
                  try
                  {
View Full Code Here

      private void calculateChildren() throws IOException
      {
         if (uuids == null)
         {
            uuids = new ArrayList();
            contextScorer.score(new HitCollector()
            {
               public void collect(int doc, float score)
               {
                  hits.set(doc);
               }
            });

            // collect nameTest hits
            final BitSet nameTestHits = new BitSet();
            if (nameTestScorer != null)
            {
               nameTestScorer.score(new HitCollector()
               {
                  public void collect(int doc, float score)
                  {
                     nameTestHits.set(doc);
                  }
View Full Code Here

            if (hits == null) {
               
                // collect all context nodes
                List uuids = new ArrayList();
                final Hits contextHits = new AdaptingHits();
                contextScorer.score(new HitCollector() {
                    public void collect(int doc, float score) {
                        contextHits.set(doc);
                    }
                });
View Full Code Here

        private void calculateParent() throws IOException {
            if (hits == null) {
                hits = new BitSet(reader.maxDoc());

                final IOException[] ex = new IOException[1];
                contextScorer.score(new HitCollector() {
                    public void collect(int doc, float score) {
                        try {
                            doc = hResolver.getParent(doc);
                            if (doc != -1) {
                                hits.set(doc);
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.HitCollector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.