for (int i = 0; i < sourceSegments.size(); i++) {
SegmentCommitInfo info = sourceSegments.get(i);
minGen = Math.min(info.getBufferedDeletesGen(), minGen);
final int docCount = info.info.getDocCount();
final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
final ReadersAndUpdates rld = readerPool.get(info, false);
// We hold a ref so it should still be in the pool:
assert rld != null: "seg=" + info.info.name;
final Bits currentLiveDocs = rld.getLiveDocs();
final Map<String,NumericFieldUpdates> mergingFieldUpdates = rld.getMergingFieldUpdates();
final String[] mergingFields;
final UpdatesIterator[] updatesIters;
if (mergingFieldUpdates.isEmpty()) {
mergingFields = null;
updatesIters = null;
} else {
mergingFields = new String[mergingFieldUpdates.size()];
updatesIters = new UpdatesIterator[mergingFieldUpdates.size()];
int idx = 0;
for (Entry<String,NumericFieldUpdates> e : mergingFieldUpdates.entrySet()) {
mergingFields[idx] = e.getKey();
updatesIters[idx] = e.getValue().getUpdates();
updatesIters[idx].nextDoc(); // advance to first update doc
++idx;
}
}
// System.out.println("[" + Thread.currentThread().getName() + "] IW.commitMergedDeletes: info=" + info + ", mergingUpdates=" + mergingUpdates);
if (prevLiveDocs != null) {
// If we had deletions on starting the merge we must
// still have deletions now:
assert currentLiveDocs != null;
assert prevLiveDocs.length() == docCount;
assert currentLiveDocs.length() == docCount;
// There were deletes on this segment when the merge
// started. The merge has collapsed away those
// deletes, but, if new deletes were flushed since
// the merge started, we must now carefully keep any
// newly flushed deletes but mapping them to the new
// docIDs.
// Since we copy-on-write, if any new deletes were
// applied after merging has started, we can just
// check if the before/after liveDocs have changed.
// If so, we must carefully merge the liveDocs one
// doc at a time:
if (currentLiveDocs != prevLiveDocs) {
// This means this segment received new deletes
// since we started the merge, so we
// must merge them:
for (int j = 0; j < docCount; j++) {
if (!prevLiveDocs.get(j)) {
assert !currentLiveDocs.get(j);
} else {
if (!currentLiveDocs.get(j)) {
if (mergedDeletesAndUpdates == null) {
mergedDeletesAndUpdates = readerPool.get(merge.info, true);
mergedDeletesAndUpdates.initWritableLiveDocs();
initWritableLiveDocs = true;
docMap = getDocMap(merge, mergeState);
} else if (!initWritableLiveDocs) { // mergedDeletes was initialized by field-updates changes
mergedDeletesAndUpdates.initWritableLiveDocs();
initWritableLiveDocs = true;
}
mergedDeletesAndUpdates.delete(docMap.map(docUpto));
if (mergingFields != null) { // advance all iters beyond the deleted document
skipDeletedDoc(updatesIters, j);
}
} else if (mergingFields != null) {
// document isn't deleted, check if any of the fields have an update to it
int newDoc = -1;
for (int idx = 0; idx < mergingFields.length; idx++) {
UpdatesIterator updatesIter = updatesIters[idx];
if (updatesIter.doc() == j) { // document has an update
if (mergedDeletesAndUpdates == null) {
mergedDeletesAndUpdates = readerPool.get(merge.info, true);
docMap = getDocMap(merge, mergeState);
}
if (newDoc == -1) { // map once per all field updates, but only if there are any updates
newDoc = docMap.map(docUpto);
}
String field = mergingFields[idx];
NumericFieldUpdates fieldUpdates = mergedFieldUpdates.get(field);
if (fieldUpdates == null) {
// an approximantion of maxDoc, used to compute best bitsPerValue
fieldUpdates = new NumericFieldUpdates.PackedNumericFieldUpdates(mergeState.segmentInfo.getDocCount());
mergedFieldUpdates.put(field, fieldUpdates);
}
fieldUpdates.add(newDoc, updatesIter.value() == null ? NumericUpdate.MISSING : updatesIter.value());
updatesIter.nextDoc(); // advance to next document
} else {
assert updatesIter.doc() > j : "updateDoc=" + updatesIter.doc() + " curDoc=" + j;
}
}
}
docUpto++;
}
}
} else if (mergingFields != null) {
// need to check each non-deleted document if it has any updates
for (int j = 0; j < docCount; j++) {
if (prevLiveDocs.get(j)) {
// document isn't deleted, check if any of the fields have an update to it
int newDoc = -1;
for (int idx = 0; idx < mergingFields.length; idx++) {
UpdatesIterator updatesIter = updatesIters[idx];
if (updatesIter.doc() == j) { // document has an update
if (mergedDeletesAndUpdates == null) {
mergedDeletesAndUpdates = readerPool.get(merge.info, true);
docMap = getDocMap(merge, mergeState);
}
if (newDoc == -1) { // map once per all field updates, but only if there are any updates
newDoc = docMap.map(docUpto);
}
String field = mergingFields[idx];
NumericFieldUpdates fieldUpdates = mergedFieldUpdates.get(field);
if (fieldUpdates == null) {
// an approximantion of maxDoc, used to compute best bitsPerValue
fieldUpdates = new NumericFieldUpdates.PackedNumericFieldUpdates(mergeState.segmentInfo.getDocCount());
mergedFieldUpdates.put(field, fieldUpdates);
}
fieldUpdates.add(newDoc, updatesIter.value() == null ? NumericUpdate.MISSING : updatesIter.value());
updatesIter.nextDoc(); // advance to next document
} else {
assert updatesIter.doc() > j : "updateDoc=" + updatesIter.doc() + " curDoc=" + j;
}
}
// advance docUpto for every non-deleted document
docUpto++;
} else {
// advance all iters beyond the deleted document
skipDeletedDoc(updatesIters, j);
}
}
} else {
docUpto += info.info.getDocCount() - info.getDelCount() - rld.getPendingDeleteCount();
}
} else if (currentLiveDocs != null) {
assert currentLiveDocs.length() == docCount;
// This segment had no deletes before but now it
// does:
for (int j = 0; j < docCount; j++) {
if (!currentLiveDocs.get(j)) {
if (mergedDeletesAndUpdates == null) {
mergedDeletesAndUpdates = readerPool.get(merge.info, true);
mergedDeletesAndUpdates.initWritableLiveDocs();
initWritableLiveDocs = true;
docMap = getDocMap(merge, mergeState);