@Override
public boolean indexInfo(Value key, long pointer) throws TerminatedException {
String ngram = new String(key.getData(), NGramQNameKey.NGRAM_OFFSET, key.getLength() - NGramQNameKey.NGRAM_OFFSET, UTF_8);
VariableByteInput is;
try {
is = index.db.getAsStream(pointer);
//Does the token already has data in the index ?
if (is == null)
return true;
while (is.available() > 0) {
int storedDocId = is.readInt();
is.readByte();
int occurrences = is.readInt();
//Read (variable) length of node IDs + frequency + offsets
int length = is.readFixedInt();
DocumentImpl storedDocument = docs.getDoc(storedDocId);
//Exit if the document is not concerned
if (storedDocument == null) {
is.skipBytes(length);
continue;
}
NodeId previous = null;
for (int m = 0; m < occurrences; m++) {
NodeId nodeId = index.getBrokerPool().getNodeFactory().createFromStream(previous, is);
previous = nodeId;
int freq = is.readInt();
NodeProxy storedNode = new NodeProxy(storedDocument, nodeId);
// if a context set is specified, we can directly check if the
// matching node is a descendant of one of the nodes
// in the context set.
if (contextSet != null) {
int sizeHint = contextSet.getSizeHint(storedDocument);
if (returnAncestor) {
NodeProxy parentNode = contextSet.parentWithChild(storedNode, false, true, NodeProxy.UNKNOWN_NODE_LEVEL);
if (parentNode != null) {
readMatches(ngram, is, nodeId, freq, parentNode);
resultSet.add(parentNode, sizeHint);
} else
is.skip(freq);
} else {
readMatches(ngram, is, nodeId, freq, storedNode);
resultSet.add(storedNode, sizeHint);
}
// otherwise, we add all text nodes without check