Package org.apache.lucene.search

Examples of org.apache.lucene.search.TermQuery


    IndexSearcher searcher = new IndexSearcher(store, true);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should put docs with shorter length first
    searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
     
      @Override
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
      }
      @Override
      public void setNextReader(IndexReader reader, int docBase) {
        this.docBase = docBase;
      }
      @Override
      public void setScorer(Scorer scorer) throws IOException {
        this.scorer = scorer;
      }
      @Override
      public boolean acceptsDocsOutOfOrder() {
        return true;
      }
    });
    searcher.close();
   
    lastScore = Float.MAX_VALUE;
    for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", " + scores[i] + " <= " + lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
    }

    FieldNormModifier fnm = new FieldNormModifier(store, s);
    fnm.reSetNorms("field");
   
    // new norm (with default similarity) should put longer docs first
    searcher = new IndexSearcher(store, true);
    searcher.search(new TermQuery(new Term("field", "word"))new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      @Override
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
View Full Code Here


    IndexSearcher searcher = new IndexSearcher(store, true);
    final float[] scores = new float[NUM_DOCS];
    float lastScore = 0.0f;
   
    // default similarity should return the same score for all documents for this query
    searcher.search(new TermQuery(new Term("untokfield", "20061212")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      @Override
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
View Full Code Here

    searcher = new IndexSearcher(directory, true);

    // query for everything to make life easier
    BooleanQuery bq = new BooleanQuery();
    bq.add(new TermQuery(new Term("owner", "bob")), BooleanClause.Occur.SHOULD);
    bq.add(new TermQuery(new Term("owner", "sue")), BooleanClause.Occur.SHOULD);
    query = bq;

    // date filter matches everything too
    //Date pastTheEnd = parseDate("2099 Jan 1");
    // dateFilter = DateFilter.Before("date", pastTheEnd);
    // just treat dates as strings and select the whole range for now...
    dateFilter = new TermRangeFilter("date","","ZZZZ",true,true);

    bobFilter = new QueryWrapperFilter(
        new TermQuery(new Term("owner", "bob")));
    sueFilter = new QueryWrapperFilter(
        new TermQuery(new Term("owner", "sue")));
  }
View Full Code Here

    IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED);
    writer.close();
 
    Searcher searcher = new IndexSearcher(dir, true);
 
    Query query = new TermQuery(new Term("none", "none"));
 
    QueryWrapperFilter queryFilter = new QueryWrapperFilter(query);
    CachingWrapperFilter cachingFilter = new CachingWrapperFilter(queryFilter);
 
    searcher.search(query, cachingFilter, 1);
View Full Code Here

  final float[] scores = new float[NUM_DOCS];
  float lastScore = 0.0f;
 
  // default similarity should put docs with shorter length first
  searcher = new IndexSearcher(store, false);
  searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
    private int docBase = 0;
    private Scorer scorer;
    @Override
    public final void collect(int doc) throws IOException {
      scores[doc + docBase] = scorer.score();
    }
    @Override
    public void setNextReader(IndexReader reader, int docBase) {
      this.docBase = docBase;
    }
    @Override
    public void setScorer(Scorer scorer) throws IOException {
      this.scorer = scorer;
    }
    @Override
    public boolean acceptsDocsOutOfOrder() {
      return true;
    }
  });
  searcher.close();
 
  lastScore = Float.MAX_VALUE;
  for (int i = 0; i < NUM_DOCS; i++) {
      String msg = "i=" + i + ", "+scores[i]+" <= "+lastScore;
      assertTrue(msg, scores[i] <= lastScore);
      //System.out.println(msg);
      lastScore = scores[i];
  }

  // override the norms to be inverted
  Similarity s = new DefaultSimilarity() {
    @Override
    public float lengthNorm(String fieldName, int numTokens) {
        return numTokens;
    }
      };
  FieldNormModifier fnm = new FieldNormModifier(store, s);
  fnm.reSetNorms("field");

  // new norm (with default similarity) should put longer docs first
  searcher = new IndexSearcher(store, false);
  searcher.search(new TermQuery(new Term("field", "word")), new Collector() {
      private int docBase = 0;
      private Scorer scorer;
      @Override
      public final void collect(int doc) throws IOException {
        scores[doc + docBase] = scorer.score();
View Full Code Here

    QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()) {
      @Override
      protected Query getWildcardQuery(String field, String termStr) throws ParseException {
        // override error checking of superclass
        type[0]=1;
        return new TermQuery(new Term(field,termStr));
      }
      @Override
      protected Query getPrefixQuery(String field, String termStr) throws ParseException {
        // override error checking of superclass
        type[0]=2;       
        return new TermQuery(new Term(field,termStr));
      }

      @Override
      protected Query getFieldQuery(String field, String queryText) throws ParseException {
        type[0]=3;
        return super.getFieldQuery(field, queryText);
      }
    };

    TermQuery tq;

    tq = (TermQuery)qp.parse("foo:zoo*");
    assertEquals("zoo",tq.getTerm().text());
    assertEquals(2,type[0]);

    tq = (TermQuery)qp.parse("foo:zoo*^2");
    assertEquals("zoo",tq.getTerm().text());
    assertEquals(2,type[0]);
    assertEquals(tq.getBoost(),2,0);

    tq = (TermQuery)qp.parse("foo:*");
    assertEquals("*",tq.getTerm().text());
    assertEquals(1,type[0])// could be a valid prefix query in the future too

    tq = (TermQuery)qp.parse("foo:*^2");
    assertEquals("*",tq.getTerm().text());
    assertEquals(1,type[0]);
    assertEquals(tq.getBoost(),2,0);   

    tq = (TermQuery)qp.parse("*:foo");
    assertEquals("*",tq.getTerm().field());
    assertEquals("foo",tq.getTerm().text());
    assertEquals(3,type[0]);

    tq = (TermQuery)qp.parse("*:*");
    assertEquals("*",tq.getTerm().field());
    assertEquals("*",tq.getTerm().text());
    assertEquals(1,type[0])// could be handled as a prefix query in the future

     tq = (TermQuery)qp.parse("(*:*)");
    assertEquals("*",tq.getTerm().field());
    assertEquals("*",tq.getTerm().text());
    assertEquals(1,type[0]);

  }
View Full Code Here

      Term searchTerm = new Term("content", "aaa");       
      IndexReader reader = IndexReader.open(startDir, true);
      assertEquals("first docFreq", 57, reader.docFreq(searchTerm));

      IndexSearcher searcher = new IndexSearcher(reader);
      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("first number of hits", 57, hits.length);
      searcher.close();
      reader.close();

      // Iterate with larger and larger amounts of free
      // disk space.  With little free disk space,
      // addIndexesNoOptimize will certainly run out of space &
      // fail.  Verify that when this happens, index is
      // not corrupt and index in fact has added no
      // documents.  Then, we increase disk space by 2000
      // bytes each iteration.  At some point there is
      // enough free disk space and addIndexesNoOptimize should
      // succeed and index should show all documents were
      // added.

      // String[] files = startDir.listAll();
      long diskUsage = startDir.sizeInBytes();

      long startDiskUsage = 0;
      String[] files = startDir.listAll();
      for(int i=0;i<files.length;i++) {
        startDiskUsage += startDir.fileLength(files[i]);
      }

      for(int iter=0;iter<3;iter++) {

        if (debug)
          System.out.println("TEST: iter=" + iter);

        // Start with 100 bytes more than we are currently using:
        long diskFree = diskUsage+100;

        int method = iter;

        boolean success = false;
        boolean done = false;

        String methodName;
        if (0 == method) {
          methodName = "addIndexes(Directory[]) + optimize()";
        } else if (1 == method) {
          methodName = "addIndexes(IndexReader[])";
        } else {
          methodName = "addIndexesNoOptimize(Directory[])";
        }

        while(!done) {

          // Make a new dir that will enforce disk usage:
          MockRAMDirectory dir = new MockRAMDirectory(startDir);
          writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED);
          IOException err = null;

          MergeScheduler ms = writer.getMergeScheduler();
          for(int x=0;x<2;x++) {
            if (ms instanceof ConcurrentMergeScheduler)
              // This test intentionally produces exceptions
              // in the threads that CMS launches; we don't
              // want to pollute test output with these.
              if (0 == x)
                ((ConcurrentMergeScheduler) ms).setSuppressExceptions();
              else
                ((ConcurrentMergeScheduler) ms).clearSuppressExceptions();

            // Two loops: first time, limit disk space &
            // throw random IOExceptions; second time, no
            // disk space limit:

            double rate = 0.05;
            double diskRatio = ((double) diskFree)/diskUsage;
            long thisDiskFree;

            String testName = null;

            if (0 == x) {
              thisDiskFree = diskFree;
              if (diskRatio >= 2.0) {
                rate /= 2;
              }
              if (diskRatio >= 4.0) {
                rate /= 2;
              }
              if (diskRatio >= 6.0) {
                rate = 0.0;
              }
              if (debug)
                testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes";
            } else {
              thisDiskFree = 0;
              rate = 0.0;
              if (debug)
                testName = "disk full test " + methodName + " with unlimited disk space";
            }

            if (debug)
              System.out.println("\ncycle: " + testName);

            dir.setMaxSizeInBytes(thisDiskFree);
            dir.setRandomIOExceptionRate(rate, diskFree);

            try {

              if (0 == method) {
                writer.addIndexesNoOptimize(dirs);
                writer.optimize();
              } else if (1 == method) {
                IndexReader readers[] = new IndexReader[dirs.length];
                for(int i=0;i<dirs.length;i++) {
                  readers[i] = IndexReader.open(dirs[i], true);
                }
                try {
                  writer.addIndexes(readers);
                } finally {
                  for(int i=0;i<dirs.length;i++) {
                    readers[i].close();
                  }
                }
              } else {
                writer.addIndexesNoOptimize(dirs);
              }

              success = true;
              if (debug) {
                System.out.println("  success!");
              }

              if (0 == x) {
                done = true;
              }

            } catch (IOException e) {
              success = false;
              err = e;
              if (debug) {
                System.out.println("  hit IOException: " + e);
                e.printStackTrace(System.out);
              }

              if (1 == x) {
                e.printStackTrace(System.out);
                fail(methodName + " hit IOException after disk space was freed up");
              }
            }

            // Make sure all threads from
            // ConcurrentMergeScheduler are done
            _TestUtil.syncConcurrentMerges(writer);

            if (debug) {
              System.out.println("  now test readers");
            }

            // Finally, verify index is not corrupt, and, if
            // we succeeded, we see all docs added, and if we
            // failed, we see either all docs or no docs added
            // (transactional semantics):
            try {
              reader = IndexReader.open(dir, true);
            } catch (IOException e) {
              e.printStackTrace(System.out);
              fail(testName + ": exception when creating IndexReader: " + e);
            }
            int result = reader.docFreq(searchTerm);
            if (success) {
              if (result != START_COUNT) {
                fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT);
              }
            } else {
              // On hitting exception we still may have added
              // all docs:
              if (result != START_COUNT && result != END_COUNT) {
                err.printStackTrace(System.out);
                fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
              }
            }

            searcher = new IndexSearcher(reader);
            try {
              hits = searcher.search(new TermQuery(searchTerm), null, END_COUNT).scoreDocs;
            } catch (IOException e) {
              e.printStackTrace(System.out);
              fail(testName + ": exception when searching: " + e);
            }
            int result2 = hits.length;
View Full Code Here

        }
        writer.close();

        Term searchTerm = new Term("content", "aaa");       
        IndexSearcher searcher = new IndexSearcher(dir, false);
        ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
        assertEquals("first number of hits", 14, hits.length);
        searcher.close();

        IndexReader reader = IndexReader.open(dir, true);

        writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
        for(int i=0;i<3;i++) {
          for(int j=0;j<11;j++) {
            addDoc(writer);
          }
          searcher = new IndexSearcher(dir, false);
          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
          assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
          searcher.close();
          assertTrue("reader should have still been current", reader.isCurrent());
        }

        // Now, close the writer:
        writer.close();
        assertFalse("reader should not be current now", reader.isCurrent());

        searcher = new IndexSearcher(dir, false);
        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
        assertEquals("reader did not see changes after writer was closed", 47, hits.length);
        searcher.close();
    }
View Full Code Here

      }
      writer.close();

      Term searchTerm = new Term("content", "aaa");       
      IndexSearcher searcher = new IndexSearcher(dir, false);
      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("first number of hits", 14, hits.length);
      searcher.close();

      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
      writer.setMaxBufferedDocs(10);
      for(int j=0;j<17;j++) {
        addDoc(writer);
      }
      // Delete all docs:
      writer.deleteDocuments(searchTerm);

      searcher = new IndexSearcher(dir, false);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
      searcher.close();

      // Now, close the writer:
      writer.rollback();

      assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");

      searcher = new IndexSearcher(dir, false);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("saw changes after writer.abort", 14, hits.length);
      searcher.close();
         
      // Now make sure we can re-open the index, add docs,
      // and all is good:
      writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
      writer.setMaxBufferedDocs(10);

      // On abort, writer in fact may write to the same
      // segments_N file:
      dir.setPreventDoubleWrite(false);

      for(int i=0;i<12;i++) {
        for(int j=0;j<17;j++) {
          addDoc(writer);
        }
        searcher = new IndexSearcher(dir, false);
        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
        assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
        searcher.close();
      }

      writer.close();
      searcher = new IndexSearcher(dir, false);
      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
      assertEquals("didn't see changes after close", 218, hits.length);
      searcher.close();

      dir.close();
    }
View Full Code Here

        }
      }
      writer.close();

      IndexSearcher searcher = new IndexSearcher(dir, false);
      ScoreDoc[] hits = searcher.search(new TermQuery(new Term("field", "aaa")), null, 1000).scoreDocs;
      assertEquals(300, hits.length);
      searcher.close();

      dir.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.TermQuery

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.