public void testNoLostDeletesOrUpdates() throws Exception {
int deleteCount = 0;
int docBase = 0;
int docCount = 0;
MockDirectoryWrapper dir = newMockDirectory();
final AtomicBoolean shouldFail = new AtomicBoolean();
dir.failOn(new MockDirectoryWrapper.Failure() {
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
if (shouldFail.get() == false) {
return;
}
boolean sawSeal = false;
boolean sawWrite = false;
for (int i = 0; i < trace.length; i++) {
if ("sealFlushedSegment".equals(trace[i].getMethodName())) {
sawSeal = true;
break;
}
if ("writeLiveDocs".equals(trace[i].getMethodName()) || "writeFieldUpdates".equals(trace[i].getMethodName())) {
sawWrite = true;
}
}
// Don't throw exc if we are "flushing", else
// the segment is aborted and docs are lost:
if (sawWrite && sawSeal == false && random().nextInt(3) == 2) {
// Only sometimes throw the exc, so we get
// it sometimes on creating the file, on
// flushing buffer, on closing the file:
if (VERBOSE) {
System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
new Throwable().printStackTrace(System.out);
}
shouldFail.set(false);
throw new FakeIOException();
}
}
});
RandomIndexWriter w = null;
for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
int numDocs = atLeast(100);
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase + " delCount=" + deleteCount);
}
if (w == null) {
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
final MergeScheduler ms = iwc.getMergeScheduler();
if (ms instanceof ConcurrentMergeScheduler) {
final ConcurrentMergeScheduler suppressFakeIOE = new ConcurrentMergeScheduler() {
@Override
protected void handleMergeException(Throwable exc) {
// suppress only FakeIOException:
if (!(exc instanceof FakeIOException)) {
super.handleMergeException(exc);
}
}
};
final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms;
suppressFakeIOE.setMaxMergesAndThreads(cms.getMaxMergeCount(), cms.getMaxThreadCount());
suppressFakeIOE.setMergeThreadPriority(cms.getMergeThreadPriority());
iwc.setMergeScheduler(suppressFakeIOE);
}
w = new RandomIndexWriter(random(), dir, iwc);
// Since we hit exc during merging, a partial
// forceMerge can easily return when there are still
// too many segments in the index:
w.setDoRandomForceMergeAssert(false);
}
for(int i=0;i<numDocs;i++) {
Document doc = new Document();
doc.add(new StringField("id", ""+(docBase+i), Field.Store.NO));
if (defaultCodecSupportsDocValues()) {
doc.add(new NumericDocValuesField("f", 1L));
doc.add(new NumericDocValuesField("cf", 2L));
}
w.addDocument(doc);
}
docCount += numDocs;
// TODO: we could make the test more evil, by letting
// it throw more than one exc, randomly, before "recovering"
// TODO: we could also install an infoStream and try
// to fail in "more evil" places inside BDS
shouldFail.set(true);
boolean doClose = false;
try {
boolean defaultCodecSupportsFieldUpdates = defaultCodecSupportsFieldUpdates();
for(int i=0;i<numDocs;i++) {
if (random().nextInt(10) == 7) {
boolean fieldUpdate = defaultCodecSupportsFieldUpdates && random().nextBoolean();
if (fieldUpdate) {
long value = iter;
if (VERBOSE) {
System.out.println(" update id=" + (docBase+i) + " to value " + value);
}
w.updateNumericDocValue(new Term("id", Integer.toString(docBase + i)), "f", value);
w.updateNumericDocValue(new Term("id", Integer.toString(docBase + i)), "cf", value * 2);
}
// sometimes do both deletes and updates
if (!fieldUpdate || random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" delete id=" + (docBase+i));
}
deleteCount++;
w.deleteDocuments(new Term("id", ""+(docBase+i)));
}
}
}
// Trigger writeLiveDocs so we hit fake exc:
IndexReader r = w.getReader(true);
// Sometimes we will make it here (we only randomly
// throw the exc):
assertEquals(docCount-deleteCount, r.numDocs());
r.close();
// Sometimes close, so the disk full happens on close:
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" now close writer");
}
doClose = true;
w.close();
w = null;
}
} catch (IOException ioe) {
// FakeIOException can be thrown from mergeMiddle, in which case IW
// registers it before our CMS gets to suppress it. IW.forceMerge later
// throws it as a wrapped IOE, so don't fail in this case.
if (ioe instanceof FakeIOException || (ioe.getCause() != null && ioe.getCause() instanceof FakeIOException)) {
// expected
if (VERBOSE) {
System.out.println("TEST: w.close() hit expected IOE");
}
} else {
throw ioe;
}
}
shouldFail.set(false);
IndexReader r;
if (doClose && w != null) {
if (VERBOSE) {
System.out.println(" now 2nd close writer");
}
w.close();
w = null;
}
if (w == null || random().nextBoolean()) {
// Open non-NRT reader, to make sure the "on
// disk" bits are good:
if (VERBOSE) {
System.out.println("TEST: verify against non-NRT reader");
}
if (w != null) {
w.commit();
}
r = DirectoryReader.open(dir);
} else {
if (VERBOSE) {
System.out.println("TEST: verify against NRT reader");
}
r = w.getReader();
}
assertEquals(docCount-deleteCount, r.numDocs());
if (defaultCodecSupportsDocValues()) {
for (AtomicReaderContext context : r.leaves()) {
Bits liveDocs = context.reader().getLiveDocs();
NumericDocValues f = context.reader().getNumericDocValues("f");
NumericDocValues cf = context.reader().getNumericDocValues("cf");
for (int i = 0; i < context.reader().maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
assertEquals("doc=" + (docBase + i), cf.get(i), f.get(i) * 2);
}
}
}
}
r.close();
// Sometimes re-use RIW, other times open new one:
if (w != null && random().nextBoolean()) {
if (VERBOSE) {
System.out.println("TEST: close writer");
}
w.close();
w = null;
}
docBase += numDocs;
}
if (w != null) {
w.close();
}
// Final verify:
IndexReader r = DirectoryReader.open(dir);
assertEquals(docCount-deleteCount, r.numDocs());
r.close();
dir.close();
}