if (typeBuilder == null) {
typeBuilder = new TypeBuilder(reader);
readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
}
BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
termDocs.seek(termEnum);
while (termDocs.next()) {
// when traversing, make sure to ignore deleted docs, so the key->docId will be correct
if (!reader.isDeleted(termDocs.doc())) {
typeBuilder.idToDoc.put(idAsBytes, termDocs.doc());
}
}
} while (termEnum.next());
} finally {
termDocs.close();
termEnum.close();
}
}
// now, go and load the docId->parentId map
for (IndexReader reader : readers) {
if (idReaders.containsKey(reader.getCoreCacheKey())) {
// no need, continue
continue;
}
Map<String, TypeBuilder> readerBuilder = builders.get(reader.getCoreCacheKey());
String field = StringHelper.intern(ParentFieldMapper.NAME);
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms(new Term(field));
try {
do {
Term term = termEnum.term();
if (term == null || term.field() != field) break;
// TODO we can optimize this, since type is the prefix, and we get terms ordered
// so, only need to move to the next type once its different
Uid uid = Uid.createUid(term.text());
TypeBuilder typeBuilder = readerBuilder.get(uid.type());
if (typeBuilder == null) {
typeBuilder = new TypeBuilder(reader);
readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
}
BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
boolean added = false; // optimize for when all the docs are deleted for this id
termDocs.seek(termEnum);
while (termDocs.next()) {
// ignore deleted docs while we are at it