public void testBackupOwnerJoiningDuringRemoveWithPreviousValue() throws Exception {
doTest(Operation.REMOVE_EXACT);
}
private void doTest(final Operation op) throws Exception {
CheckPoint checkPoint = new CheckPoint();
LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
int preJoinTopologyId = ltm0.getCacheTopology(CACHE_NAME).getTopologyId();
final AdvancedCache<Object, Object> cache0 = advancedCache(0);
addBlockingLocalTopologyManager(manager(0), checkPoint, preJoinTopologyId);
final AdvancedCache<Object, Object> cache1 = advancedCache(1);
addBlockingLocalTopologyManager(manager(1), checkPoint, preJoinTopologyId);
// Add a new member, but don't start the cache yet
ConfigurationBuilder c = getConfigurationBuilder();
c.clustering().stateTransfer().awaitInitialTransfer(false);
addClusterEnabledCacheManager(c);
addBlockingLocalTopologyManager(manager(2), checkPoint, preJoinTopologyId);
// Start the cache and wait until it's a member in the write CH
log.tracef("Starting the cache on the joiner");
final AdvancedCache<Object,Object> cache2 = advancedCache(2);
int duringJoinTopologyId = preJoinTopologyId + 1;
checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(0));
checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(2));
// Wait for the write CH to contain the joiner everywhere
eventually(new Condition() {
@Override
public boolean isSatisfied() throws Exception {
return cache0.getRpcManager().getMembers().size() == 3 &&
cache1.getRpcManager().getMembers().size() == 3 &&
cache2.getRpcManager().getMembers().size() == 3;
}
});
// Every ClusteredGetKeyValueCommand will be blocked before returning on cache0
CyclicBarrier beforeCache0Barrier = new CyclicBarrier(2);
BlockingInterceptor blockingInterceptor0 = new BlockingInterceptor(beforeCache0Barrier,
GetKeyValueCommand.class, false);
cache0.addInterceptorBefore(blockingInterceptor0, StateTransferInterceptor.class);
// Every PutKeyValueCommand will be blocked before returning on cache1
CyclicBarrier afterCache1Barrier = new CyclicBarrier(2);
BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(afterCache1Barrier,
op.getCommandClass(), false);
cache1.addInterceptorBefore(blockingInterceptor1, StateTransferInterceptor.class);
// Every PutKeyValueCommand will be blocked before reaching the distribution interceptor on cache2
CyclicBarrier beforeCache2Barrier = new CyclicBarrier(2);
BlockingInterceptor blockingInterceptor2 = new BlockingInterceptor(beforeCache2Barrier,
op.getCommandClass(), true);
cache2.addInterceptorBefore(blockingInterceptor2, NonTxConcurrentDistributionInterceptor.class);
final MagicKey key = getKeyForCache2();
// Prepare for replace: put a previous value in cache0 and cache1
if (op.getPreviousValue() != null) {
cache0.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
cache1.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
}
// Put from cache0 with cache0 as primary owner, cache2 will become a backup owner for the retry
// The put command will be blocked on cache1 and cache2.
Future<Object> future = fork(new Callable<Object>() {
@Override
public Object call() throws Exception {
switch (op) {
case PUT:
return cache0.put(key, op.getValue());
case PUT_IF_ABSENT:
return cache0.putIfAbsent(key, op.getValue());
case REPLACE:
return cache0.replace(key, op.getValue());
case REPLACE_EXACT:
return cache0.replace(key, op.getPreviousValue(), op.getValue());
case REMOVE:
return cache0.remove(key);
case REMOVE_EXACT:
return cache0.remove(key, op.getPreviousValue());
default:
throw new IllegalArgumentException("Unsupported operation: " + op);
}
}
});
// Wait for the value to be written on cache1
afterCache1Barrier.await(10, TimeUnit.SECONDS);
afterCache1Barrier.await(10, TimeUnit.SECONDS);
// Allow the command to proceed on cache2
beforeCache2Barrier.await(10, TimeUnit.SECONDS);
beforeCache2Barrier.await(10, TimeUnit.SECONDS);
// Check that the put command didn't fail
Object result = future.get(10, TimeUnit.SECONDS);
assertEquals(op.getReturnValue(), result);
log.tracef("%s operation is done", op);
// Stop blocking get commands on cache0
// beforeCache0Barrier.await(10, TimeUnit.SECONDS);
// beforeCache0Barrier.await(10, TimeUnit.SECONDS);
cache0.removeInterceptor(BlockingInterceptor.class);
// Allow the rebalance to end
int postJoinTopologyId = duringJoinTopologyId + 1;
checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(0));
checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(1));
checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(2));
// Wait for the topology to change everywhere
TestingUtil.waitForRehashToComplete(cache0, cache1, cache2);
// Check the value on all the nodes