Package org.apache.lucene.util

Examples of org.apache.lucene.util.PriorityQueue


    /**
     * @see #retrieveInterestingTerms(java.io.Reader)
     */
    public String[] retrieveInterestingTerms(int docNum) throws IOException {
        ArrayList al = new ArrayList(maxQueryTerms);
        PriorityQueue pq = retrieveTerms(docNum);
        Object cur;
        // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
        int lim = maxQueryTerms;
        // we just want to return the top words
        while (((cur = pq.pop()) != null) && lim-- > 0) {
            Object[] ar = (Object[]) cur;
            al.add(ar[0]); // the 1st entry is the interesting word
        }
        String[] res = new String[al.size()];
        return (String[]) al.toArray(res);
View Full Code Here


     * @see #retrieveTerms(java.io.Reader)
     * @see #setMaxQueryTerms
     */
    public String[] retrieveInterestingTerms(Reader r) throws IOException {
        ArrayList al = new ArrayList(maxQueryTerms);
        PriorityQueue pq = retrieveTerms(r);
        Object cur;
        // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
        int lim = maxQueryTerms;
        // we just want to return the top words
        while (((cur = pq.pop()) != null) && lim-- > 0) {
            Object[] ar = (Object[]) cur;
            al.add(ar[0]); // the 1st entry is the interesting word
        }
        String[] res = new String[al.size()];
        return (String[]) al.toArray(res);
View Full Code Here

    /**
     * @see #retrieveInterestingTerms(java.io.Reader)
     */
    public String[] retrieveInterestingTerms(int docNum) throws IOException {
        List<String> al = new ArrayList<String>(maxQueryTerms);
        PriorityQueue pq = retrieveTerms(docNum);
        Object cur;
        int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
        // we just want to return the top words
        while (((cur = pq.pop()) != null) && lim-- > 0) {
            Object[] ar = (Object[]) cur;
            al.add((String) ar[0]); // the 1st entry is the interesting word
        }
        return al.toArray(new String[al.size()]);
    }
View Full Code Here

     * @see #retrieveTerms(java.io.Reader)
     * @see #setMaxQueryTerms
     */
    public String[] retrieveInterestingTerms(Reader r) throws IOException {
        List<String> al = new ArrayList<String>(maxQueryTerms);
        PriorityQueue pq = retrieveTerms(r);
        Object cur;
        int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
        // we just want to return the top words
        while (((cur = pq.pop()) != null) && lim-- > 0) {
            Object[] ar = (Object[]) cur;
            al.add((String) ar[0]); // the 1st entry is the interesting word
        }
        return al.toArray(new String[al.size()]);
    }
View Full Code Here

        if (queryResultProvider.includeFetch()) {
            // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
            // this is also important since we shortcut and fetch only docs from "from" and up to "size"
            queueSize *= results.size();
        }
        PriorityQueue queue;
        if (queryResultProvider.queryResult().topDocs() instanceof TopFieldDocs) {
            // sorting, first if the type is a String, chance CUSTOM to STRING so we handle nulls properly (since our CUSTOM String sorting might return null)
            TopFieldDocs fieldDocs = (TopFieldDocs) queryResultProvider.queryResult().topDocs();
            for (int i = 0; i < fieldDocs.fields.length; i++) {
                boolean allValuesAreNull = true;
                boolean resolvedField = false;
                for (QuerySearchResultProvider resultProvider : results) {
                    for (ScoreDoc doc : resultProvider.queryResult().topDocs().scoreDocs) {
                        FieldDoc fDoc = (FieldDoc) doc;
                        if (fDoc.fields[i] != null) {
                            allValuesAreNull = false;
                            if (fDoc.fields[i] instanceof String) {
                                fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.STRING, fieldDocs.fields[i].getReverse());
                            }
                            resolvedField = true;
                            break;
                        }
                    }
                    if (resolvedField) {
                        break;
                    }
                }
                if (!resolvedField && allValuesAreNull && fieldDocs.fields[i].getField() != null) {
                    // we did not manage to resolve a field (and its not score or doc, which have no field), and all the fields are null (which can only happen for STRING), make it a STRING
                    fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.STRING, fieldDocs.fields[i].getReverse());
                }
            }
            queue = new ShardFieldDocSortedHitQueue(fieldDocs.fields, queueSize);

            // we need to accumulate for all and then filter the from
            for (QuerySearchResultProvider resultProvider : results) {
                QuerySearchResult result = resultProvider.queryResult();
                ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
                totalNumDocs += scoreDocs.length;
                for (ScoreDoc doc : scoreDocs) {
                    ShardFieldDoc nodeFieldDoc = new ShardFieldDoc(result.shardTarget(), doc.doc, doc.score, ((FieldDoc) doc).fields);
                    if (queue.insertWithOverflow(nodeFieldDoc) == nodeFieldDoc) {
                        // filled the queue, break
                        break;
                    }
                }
            }
        } else {
            queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from
            for (QuerySearchResultProvider resultProvider : results) {
                QuerySearchResult result = resultProvider.queryResult();
                ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
                totalNumDocs += scoreDocs.length;
                for (ScoreDoc doc : scoreDocs) {
                    ShardScoreDoc nodeScoreDoc = new ShardScoreDoc(result.shardTarget(), doc.doc, doc.score);
                    if (queue.insertWithOverflow(nodeScoreDoc) == nodeScoreDoc) {
                        // filled the queue, break
                        break;
                    }
                }
            }

        }

        int resultDocsSize = queryResultProvider.queryResult().size();
        if (queryResultProvider.includeFetch()) {
            // if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
            resultDocsSize *= results.size();
        }
        if (totalNumDocs < queueSize) {
            resultDocsSize = totalNumDocs - queryResultProvider.queryResult().from();
        }

        if (resultDocsSize <= 0) {
            return EMPTY;
        }

        // we only pop the first, this handles "from" nicely since the "from" are down the queue
        // that we already fetched, so we are actually popping the "from" and up to "size"
        ShardDoc[] shardDocs = new ShardDoc[resultDocsSize];
        for (int i = resultDocsSize - 1; i >= 0; i--)      // put docs in array
            shardDocs[i] = (ShardDoc) queue.pop();
        return shardDocs;
    }
View Full Code Here

            // nothing to highlight
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
        for (int i = 0; i < offsets.length; i++) {
            if (offsets[i].getEndOffset() <= text.length()) {
                FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
                for (int j = i + 1; j < offsets.length; j++) {
                    if (offsets[j].getEndOffset() > text.length()) {
                        break;
                    }
                    if (!fi.add(offsets[j], text)) {
                        break;
                    }
                }
                bestFragments.insert(fi);
            }
        }

        if (bestFragments.size() == 0) {
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        // retrieve fragment infos from queue and fill into list, least
        // fragment comes out first
        List infos = new LinkedList();
        while (bestFragments.size() > 0) {
            FragmentInfo fi = (FragmentInfo) bestFragments.pop();
            infos.add(0, fi);
        }

        Map offsetInfos = new IdentityHashMap();
        // remove overlapping fragment infos
View Full Code Here

            // nothing to highlight
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
        for (int i = 0; i < offsets.length; i++) {
            if (offsets[i].getEndOffset() <= text.length()) {
                FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
                for (int j = i + 1; j < offsets.length; j++) {
                    if (offsets[j].getEndOffset() > text.length()) {
                        break;
                    }
                    if (!fi.add(offsets[j], text)) {
                        break;
                    }
                }
                bestFragments.insert(fi);
            }
        }

        if (bestFragments.size() == 0) {
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        // retrieve fragment infos from queue and fill into list, least
        // fragment comes out first
        List<FragmentInfo> infos = new LinkedList<FragmentInfo>();
        while (bestFragments.size() > 0) {
            FragmentInfo fi = (FragmentInfo) bestFragments.pop();
            infos.add(0, fi);
        }

        Map<TermVectorOffsetInfo, Object> offsetInfos = new IdentityHashMap<TermVectorOffsetInfo, Object>();
        // remove overlapping fragment infos
View Full Code Here

            // nothing to highlight
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
        for (int i = 0; i < offsets.length; i++) {
            if (offsets[i].getEndOffset() <= text.length()) {
                FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
                for (int j = i + 1; j < offsets.length; j++) {
                    if (offsets[j].getEndOffset() > text.length()) {
                        break;
                    }
                    if (!fi.add(offsets[j], text)) {
                        break;
                    }
                }
                bestFragments.insert(fi);
            }
        }

        if (bestFragments.size() == 0) {
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        // retrieve fragment infos from queue and fill into list, least
        // fragment comes out first
        List<FragmentInfo> infos = new LinkedList<FragmentInfo>();
        while (bestFragments.size() > 0) {
            FragmentInfo fi = (FragmentInfo) bestFragments.pop();
            infos.add(0, fi);
        }

        Map<TermVectorOffsetInfo, Object> offsetInfos = new IdentityHashMap<TermVectorOffsetInfo, Object>();
        // remove overlapping fragment infos
View Full Code Here

            // nothing to highlight
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        PriorityQueue bestFragments = new FragmentInfoPriorityQueue(maxFragments);
        for (int i = 0; i < offsets.length; i++) {
            if (offsets[i].getEndOffset() <= text.length()) {
                FragmentInfo fi = new FragmentInfo(offsets[i], surround * 2);
                for (int j = i + 1; j < offsets.length; j++) {
                    if (offsets[j].getEndOffset() > text.length()) {
                        break;
                    }
                    if (!fi.add(offsets[j], text)) {
                        break;
                    }
                }
                bestFragments.insert(fi);
            }
        }

        if (bestFragments.size() == 0) {
            return createDefaultExcerpt(text, excerptStart, excerptEnd,
                    fragmentStart, fragmentEnd, surround * 2);
        }

        // retrieve fragment infos from queue and fill into list, least
        // fragment comes out first
        List infos = new LinkedList();
        while (bestFragments.size() > 0) {
            FragmentInfo fi = (FragmentInfo) bestFragments.pop();
            infos.add(0, fi);
        }

        Map offsetInfos = new IdentityHashMap();
        // remove overlapping fragment infos
View Full Code Here

    /**
     * @see #retrieveInterestingTerms(java.io.Reader)
     */
    public String[] retrieveInterestingTerms(int docNum) throws IOException {
        ArrayList al = new ArrayList(maxQueryTerms);
        PriorityQueue pq = retrieveTerms(docNum);
        Object cur;
        int lim = maxQueryTerms; // have to be careful, retrieveTerms returns all words but that's probably not useful to our caller...
        // we just want to return the top words
        while (((cur = pq.pop()) != null) && lim-- > 0) {
            Object[] ar = (Object[]) cur;
            al.add(ar[0]); // the 1st entry is the interesting word
        }
        String[] res = new String[al.size()];
        return (String[]) al.toArray(res);
View Full Code Here

TOP

Related Classes of org.apache.lucene.util.PriorityQueue

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.