Package org.apache.solr.search

Examples of org.apache.solr.search.DocList


    List<Integer> docIds = getInts(params.getParams(TermVectorParams.DOC_IDS));
    Iterator<Integer> iter;
    if (docIds != null && !docIds.isEmpty()) {
      iter = docIds.iterator();
    } else {
      DocList list = listAndSet.docList;
      iter = list.iterator();
    }
    SolrIndexSearcher searcher = rb.req.getSearcher();

    IndexReader reader = searcher.getReader();
    //the TVMapper is a TermVectorMapper which can be used to optimize loading of Term Vectors
View Full Code Here


            true);
        int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
        // Find the base match
        Query query = QueryParsing.parseQuery(q, params.get(CommonParams.DF),
            params, req.getSchema());
        DocList match = searcher.getDocList(query, null, null, matchOffset, 1,
            flags); // only get the first one...
        if (includeMatch) {
          rsp.add("match", match);
        }

        // This is an iterator, but we only handle the first match
        DocIterator iterator = match.iterator();
        if (iterator.hasNext()) {
          // do a MoreLikeThis query for each document in results
          int id = iterator.nextDoc();
          mltDocs = mlt.getMoreLikeThis(id, start, rows, filters, interesting,
              flags);
View Full Code Here

        // level values in the NamedList are checked for DocLists.
        NamedList values = rsp.getValues();
        for (int i=0; i<values.size(); i++) {
          Object o = values.getVal(i);
          if (o instanceof DocList) {
            DocList docs = (DocList)o;
            for (DocIterator iter = docs.iterator(); iter.hasNext();) {
              newSearcher.doc(iter.nextDoc());
            }
          }
        }
View Full Code Here

          responseWriter.endDocumentList();
        } else {
          responseWriter.writeAllDocs(info, list);
        }
      } else if (val instanceof DocList) {
        DocList docList = (DocList) val;
        int sz = docList.size();
        IdxInfo idxInfo = new IdxInfo(request.getSchema(), request
            .getSearcher(), response.getReturnFields());
        DocListInfo info = new DocListInfo(docList.matches(), docList.size(),docList.offset(),
            docList.maxScore());
        DocIterator iterator = docList.iterator();
        if (responseWriter.isStreamingDocs()) {
          responseWriter.startDocumentList(name,info);
          for (int j = 0; j < sz; j++) {
            SolrDocument sdoc = getDoc(iterator.nextDoc(), idxInfo);
            if (idxInfo.includeScore && docList.hasScores()) {
              sdoc.addField(SCORE_FIELD, iterator.score());
            }
            responseWriter.writeDoc(sdoc);
          }
        } else {
          ArrayList<SolrDocument> list = new ArrayList<SolrDocument>(docList
              .size());
          for (int j = 0; j < sz; j++) {
            SolrDocument sdoc = getDoc(iterator.nextDoc(), idxInfo);
            if (idxInfo.includeScore && docList.hasScores()) {
              sdoc.addField(SCORE_FIELD, iterator.score());
            }
            list.add(sdoc);
          }
          responseWriter.writeAllDocs(info, list);
View Full Code Here

      int adjacentWindowSize = params.getInt(QAParams.ADJACENT_WINDOW_SIZE, DEFAULT_ADJACENT_WINDOW_SIZE);
      int secondaryWindowSize = params.getInt(QAParams.SECONDARY_WINDOW_SIZE, DEFAULT_SECONDARY_WINDOW_SIZE);
      WindowBuildingTVM tvm = new WindowBuildingTVM(primaryWindowSize, adjacentWindowSize, secondaryWindowSize);
      PassagePriorityQueue rankedPassages = new PassagePriorityQueue();
      //intersect w/ doclist
      DocList docList = rb.getResults().docList;
      while (spans.next() == true) {
        //build up the window
        if (docList.exists(spans.doc())) {
          tvm.spanStart = spans.start();
          tvm.spanEnd = spans.end();
          reader.getTermFreqVector(spans.doc(), sQuery.getField(), tvm);
          //The entries map contains the window, do some ranking of it
          if (tvm.passage.terms.isEmpty() == false) {
View Full Code Here

    NamedList nl = rsp.getValues();
    int sz = nl.size();
    for (int li = 0; li < sz; li++) {
      Object val = nl.getVal(li);
      if (val instanceof DocList) { //<co id="co.fuzzy.type-ahead.doclist"/>
        DocList dl = (DocList) val;
        DocIterator iterator = dl.iterator();
        w.append("<ul>\n");
        while (iterator.hasNext()) {
          int id = iterator.nextDoc();
          Document doc = searcher.doc(id, fields); //<co id="co.fuzzy.type-ahead.search"/>
          String name = doc.get("word");
View Full Code Here

    List<Integer> docIds = getInts(params.getParams(TermVectorParams.DOC_IDS));
    Iterator<Integer> iter;
    if (docIds != null && docIds.isEmpty() == false) {
      iter = docIds.iterator();
    } else {
      DocList list = listAndSet.docList;
      iter = list.iterator();
    }
    SolrIndexSearcher searcher = rb.req.getSearcher();

    IndexReader reader = searcher.getReader();
    //the TVMapper is a TermVectorMapper which can be used to optimize loading of Term Vectors
View Full Code Here

      String snippet = getValue(sdoc, snippetField);
      // TODO: docIds will be null when running distributed search.
      // See comment in ClusteringComponent#finishStage().
      if (produceSummary && docIds != null) {
        docsHolder[0] = docIds.get(sdoc).intValue();
        DocList docAsList = new DocSlice(0, 1, docsHolder, scores, 1, 1.0f);
        NamedList<Object> highlights = highlighter.doHighlighting(docAsList, theQuery, req, snippetFieldAry);
        if (highlights != null && highlights.size() == 1) {//should only be one value given our setup
          //should only be one document with one field
          @SuppressWarnings("unchecked")
          NamedList<String []> tmp = (NamedList<String[]>) highlights.getVal(0);
View Full Code Here

  
    SolrQueryRequest req = req("q", "title:keyword", "fl", "id,title,test_hlt");
    SolrQueryResponse rsp = new SolrQueryResponse();
    core.execute(core.getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp);

    DocList dl = (DocList) rsp.getValues().get("response");
    org.apache.lucene.document.Document d = req.getSearcher().doc(dl.iterator().nextDoc());
    // ensure field is not lazy, only works for Non-Numeric fields currently (if you change schema behind test, this may fail)
    assertTrue( d.getFieldable("test_hlt") instanceof Field );
    assertTrue( d.getFieldable("title") instanceof Field );
    req.close();
  }
View Full Code Here

   
    SolrQueryRequest req = req("q", "title:keyword", "fl", "id,title");
    SolrQueryResponse rsp = new SolrQueryResponse();
    core.execute(core.getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp);

    DocList dl = (DocList) rsp.getValues().get("response");
    DocIterator di = dl.iterator();   
    org.apache.lucene.document.Document d = req.getSearcher().doc(di.nextDoc());
    // ensure field is lazy
    assertTrue( !( d.getFieldable("test_hlt") instanceof Field ) );
    assertTrue( d.getFieldable("title") instanceof Field );
    req.close();
View Full Code Here

TOP

Related Classes of org.apache.solr.search.DocList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.