FileStore store = new FileStore(directory, 1, false);
SegmentWriter writer = store.getTracker().getWriter();
SegmentNodeState base = store.getHead();
SegmentNodeBuilder builder = base.builder();
byte[] data = new byte[largeBinarySize];
new Random().nextBytes(data);
SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data));
builder.setProperty("foo", blob);
builder.getNodeState(); // write the blob reference to the segment
builder.setProperty("foo", "bar");
SegmentNodeState head = builder.getNodeState();
assertTrue(store.setHead(base, head));
assertEquals("bar", store.getHead().getString("foo"));
store.close();
// First simulate the case where during compaction a reference to the
// older segments is added to a segment that the compactor is writing
store = new FileStore(directory, 1, false);
head = store.getHead();
assertTrue(store.size() > largeBinarySize);
Compactor compactor = new Compactor(writer);
SegmentNodeState compacted =
compactor.compact(EmptyNodeState.EMPTY_NODE, head);
builder = head.builder();
builder.setChildNode("old", head); // reference to pre-compacted state
builder.getNodeState();
assertTrue(store.setHead(head, compacted));
store.close();
// In this case the revision cleanup is unable to reclaim the old data
store = new FileStore(directory, 1, false);
assertTrue(store.size() > largeBinarySize);
store.cleanup();
assertTrue(store.size() > largeBinarySize);
store.close();
// Now we do the same thing, but let the compactor use a different
// SegmentWriter
store = new FileStore(directory, 1, false);
head = store.getHead();
assertTrue(store.size() > largeBinarySize);
writer = new SegmentWriter(store, store.getTracker());
compactor = new Compactor(writer);
compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
builder = head.builder();
builder.setChildNode("old", head); // reference to pre-compacted state
builder.getNodeState();
writer.flush();
assertTrue(store.setHead(head, compacted));
store.close();
// Revision cleanup is now able to reclaim the extra space (OAK-1932)