if (queryResultProvider.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
// this is also important since we shortcut and fetch only docs from "from" and up to "size"
queueSize *= results.size();
}
PriorityQueue queue;
if (queryResultProvider.queryResult().topDocs() instanceof TopFieldDocs) {
// sorting, first if the type is a String, chance CUSTOM to STRING so we handle nulls properly (since our CUSTOM String sorting might return null)
TopFieldDocs fieldDocs = (TopFieldDocs) queryResultProvider.queryResult().topDocs();
for (int i = 0; i < fieldDocs.fields.length; i++) {
boolean allValuesAreNull = true;
boolean resolvedField = false;
for (QuerySearchResultProvider resultProvider : results) {
for (ScoreDoc doc : resultProvider.queryResult().topDocs().scoreDocs) {
FieldDoc fDoc = (FieldDoc) doc;
if (fDoc.fields[i] != null) {
allValuesAreNull = false;
if (fDoc.fields[i] instanceof String) {
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.STRING, fieldDocs.fields[i].getReverse());
}
resolvedField = true;
break;
}
}
if (resolvedField) {
break;
}
}
if (!resolvedField && allValuesAreNull && fieldDocs.fields[i].getField() != null) {
// we did not manage to resolve a field (and its not score or doc, which have no field), and all the fields are null (which can only happen for STRING), make it a STRING
fieldDocs.fields[i] = new SortField(fieldDocs.fields[i].getField(), SortField.STRING, fieldDocs.fields[i].getReverse());
}
}
queue = new ShardFieldDocSortedHitQueue(fieldDocs.fields, queueSize);
// we need to accumulate for all and then filter the from
for (QuerySearchResultProvider resultProvider : results) {
QuerySearchResult result = resultProvider.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
ShardFieldDoc nodeFieldDoc = new ShardFieldDoc(result.shardTarget(), doc.doc, doc.score, ((FieldDoc) doc).fields);
if (queue.insertWithOverflow(nodeFieldDoc) == nodeFieldDoc) {
// filled the queue, break
break;
}
}
}
} else {
queue = new ScoreDocQueue(queueSize); // we need to accumulate for all and then filter the from
for (QuerySearchResultProvider resultProvider : results) {
QuerySearchResult result = resultProvider.queryResult();
ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
totalNumDocs += scoreDocs.length;
for (ScoreDoc doc : scoreDocs) {
ShardScoreDoc nodeScoreDoc = new ShardScoreDoc(result.shardTarget(), doc.doc, doc.score);
if (queue.insertWithOverflow(nodeScoreDoc) == nodeScoreDoc) {
// filled the queue, break
break;
}
}
}
}
int resultDocsSize = queryResultProvider.queryResult().size();
if (queryResultProvider.includeFetch()) {
// if we did both query and fetch on the same go, we have fetched all the docs from each shards already, use them...
resultDocsSize *= results.size();
}
if (totalNumDocs < queueSize) {
resultDocsSize = totalNumDocs - queryResultProvider.queryResult().from();
}
if (resultDocsSize <= 0) {
return EMPTY;
}
// we only pop the first, this handles "from" nicely since the "from" are down the queue
// that we already fetched, so we are actually popping the "from" and up to "size"
ShardDoc[] shardDocs = new ShardDoc[resultDocsSize];
for (int i = resultDocsSize - 1; i >= 0; i--) // put docs in array
shardDocs[i] = (ShardDoc) queue.pop();
return shardDocs;
}