}
//clear out the entries for the next round
tvm.passage.clear();
}
}
NamedList qaResp = new NamedList();
rb.rsp.add("qaResponse", qaResp);
int rows = params.getInt(QA_ROWS, 5);
SchemaField uniqField = rb.req.getSchema().getUniqueKeyField();
if (rankedPassages.size() > 0) {
int size = Math.min(rows, rankedPassages.size());
Set<String> fields = new HashSet<String>();
for (int i = size - 1; i >= 0; i--) {
Passage passage = rankedPassages.pop();
if (passage != null) {
NamedList passNL = new NamedList();
qaResp.add(("answer"), passNL);
String idName;
String idValue;
if (uniqField != null) {
idName = uniqField.getName();
fields.add(idName);
fields.add(passage.field);//prefetch this now, so that it is cached
idValue = searcher.doc(passage.lDocId, fields).get(idName);
} else {
idName = "luceneDocId";
idValue = String.valueOf(passage.lDocId);
}
passNL.add(idName, idValue);
passNL.add("field", passage.field);
//get the window
String fldValue = searcher.doc(passage.lDocId, fields).get(passage.field);
if (fldValue != null) {
//get the window of words to display, we don't use the passage window, as that is based on the term vector
int start = passage.terms.first().start;//use the offsets
int end = passage.terms.last().end;
if (start >= 0 && start < fldValue.length() &&
end >= 0 && end < fldValue.length()) {
passNL.add("window", fldValue.substring(start, end + passage.terms.last().term.length()));
} else {
log.debug("Passage does not have correct offset information");
passNL.add("window", fldValue);//we don't have offsets, or they are incorrect, return the whole field value
}
}
} else {
break;
}