// First simulate the case where during compaction a reference to the
// older segments is added to a segment that the compactor is writing
store = new FileStore(directory, 1, false);
head = store.getHead();
assertTrue(store.size() > largeBinarySize);
Compactor compactor = new Compactor(writer);
SegmentNodeState compacted =
compactor.compact(EmptyNodeState.EMPTY_NODE, head);
builder = head.builder();
builder.setChildNode("old", head); // reference to pre-compacted state
builder.getNodeState();
assertTrue(store.setHead(head, compacted));
store.close();
// In this case the revision cleanup is unable to reclaim the old data
store = new FileStore(directory, 1, false);
assertTrue(store.size() > largeBinarySize);
store.cleanup();
assertTrue(store.size() > largeBinarySize);
store.close();
// Now we do the same thing, but let the compactor use a different
// SegmentWriter
store = new FileStore(directory, 1, false);
head = store.getHead();
assertTrue(store.size() > largeBinarySize);
writer = new SegmentWriter(store, store.getTracker());
compactor = new Compactor(writer);
compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
builder = head.builder();
builder.setChildNode("old", head); // reference to pre-compacted state
builder.getNodeState();
writer.flush();
assertTrue(store.setHead(head, compacted));