Package org.infinispan.test.fwk

Examples of org.infinispan.test.fwk.CheckPoint


      }

      int preJoinTopologyId = cache0.getComponentRegistry().getStateTransferManager().getCacheTopology().getTopologyId();

      // Block any state response commands on cache0
      CheckPoint checkPoint = new CheckPoint();
      ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(cache0);

      // Block the rebalance confirmation on cache0
      blockRebalanceConfirmation(manager(0), checkPoint);

      // Start the joiner
      log.tracef("Starting the cache on the joiner");
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);

      final AdvancedCache<Object,Object> cache1 = advancedCache(1);
      int rebalanceTopologyId = preJoinTopologyId + 1;

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 2 &&
                  cache1.getRpcManager().getMembers().size() == 2;
         }
      });

      // Every PutKeyValueCommand will be blocked before committing the entry on cache1
      CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(beforeCommitCache1Barrier,
            op.getCommandClass(), true);
      cache1.addInterceptorAfter(blockingInterceptor1, EntryWrappingInterceptor.class);

      // Wait for cache0 to collect the state to send to cache1 (including our previous value).
      blockingRpcManager0.waitForCommandToBlock();

      // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup owner for the retry
      // The put command will be blocked on cache1 just before committing the entry.
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return op.perform(cache0, key);
         }
      });

      // Wait for the entry to be wrapped on cache1
      beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

      // Allow the state to be applied on cache1 (writing the old value for our entry)
      blockingRpcManager0.stopBlocking();

      // Wait for cache1 to finish applying the state, but don't allow the rebalance confirmation to be processed.
      // (It would change the topology and it would trigger a retry for the command.)
      checkPoint.awaitStrict("pre_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1), 10, SECONDS);

      // Now allow the command to commit on cache1
      beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

      // Wait for the command to finish and check that it didn't fail
      Object result = future.get(10, TimeUnit.SECONDS);
      assertEquals(op.getReturnValue(), result);
      log.tracef("%s operation is done", op);

      // Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(0));
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1));
      TestingUtil.waitForRehashToComplete(cache0, cache1);

      // Check the value on all the nodes
      assertEquals(op.getValue(), cache0.get(key));
      assertEquals(op.getValue(), cache1.get(key));
View Full Code Here


   public void testBackupOwnerJoiningDuringRemoveWithPreviousValue() throws Exception {
      doTest(Operation.REMOVE_EXACT);
   }

   private void doTest(final Operation op) throws Exception {
      CheckPoint checkPoint = new CheckPoint();
      LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
      int preJoinTopologyId = ltm0.getCacheTopology(CACHE_NAME).getTopologyId();

      final AdvancedCache<Object, Object> cache0 = advancedCache(0);
      addBlockingLocalTopologyManager(manager(0), checkPoint, preJoinTopologyId);

      final AdvancedCache<Object, Object> cache1 = advancedCache(1);
      addBlockingLocalTopologyManager(manager(1), checkPoint, preJoinTopologyId);

      // Add a new member, but don't start the cache yet
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);
      addBlockingLocalTopologyManager(manager(2), checkPoint, preJoinTopologyId);

      // Start the cache and wait until it's a member in the write CH
      log.tracef("Starting the cache on the joiner");
      final AdvancedCache<Object,Object> cache2 = advancedCache(2);
      int duringJoinTopologyId = preJoinTopologyId + 1;
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(2));

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 3 &&
                  cache1.getRpcManager().getMembers().size() == 3 &&
                  cache2.getRpcManager().getMembers().size() == 3;
         }
      });

      // Every ClusteredGetKeyValueCommand will be blocked before returning on cache0
      CyclicBarrier beforeCache0Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor0 = new BlockingInterceptor(beforeCache0Barrier,
            GetKeyValueCommand.class, false);
      cache0.addInterceptorBefore(blockingInterceptor0, StateTransferInterceptor.class);

      // Every PutKeyValueCommand will be blocked before returning on cache1
      CyclicBarrier afterCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(afterCache1Barrier,
            op.getCommandClass(), false);
      cache1.addInterceptorBefore(blockingInterceptor1, StateTransferInterceptor.class);

      // Every PutKeyValueCommand will be blocked before reaching the distribution interceptor on cache2
      CyclicBarrier beforeCache2Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor2 = new BlockingInterceptor(beforeCache2Barrier,
      op.getCommandClass(), true);
      cache2.addInterceptorBefore(blockingInterceptor2, NonTxConcurrentDistributionInterceptor.class);


      final MagicKey key = getKeyForCache2();

      // Prepare for replace: put a previous value in cache0 and cache1
      if (op.getPreviousValue() != null) {
         cache0.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
         cache1.withFlags(Flag.CACHE_MODE_LOCAL).put(key, op.getPreviousValue());
      }

      // Put from cache0 with cache0 as primary owner, cache2 will become a backup owner for the retry
      // The put command will be blocked on cache1 and cache2.
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            switch (op) {
               case PUT:
                  return cache0.put(key, op.getValue());
               case PUT_IF_ABSENT:
                  return cache0.putIfAbsent(key, op.getValue());
               case REPLACE:
                  return cache0.replace(key, op.getValue());
               case REPLACE_EXACT:
                  return cache0.replace(key, op.getPreviousValue(), op.getValue());
               case REMOVE:
                  return cache0.remove(key);
               case REMOVE_EXACT:
                  return cache0.remove(key, op.getPreviousValue());
               default:
                  throw new IllegalArgumentException("Unsupported operation: " + op);
            }
         }
      });

      // Wait for the value to be written on cache1
      afterCache1Barrier.await(10, TimeUnit.SECONDS);
      afterCache1Barrier.await(10, TimeUnit.SECONDS);

      // Allow the command to proceed on cache2
      beforeCache2Barrier.await(10, TimeUnit.SECONDS);
      beforeCache2Barrier.await(10, TimeUnit.SECONDS);

      // Check that the put command didn't fail
      Object result = future.get(10, TimeUnit.SECONDS);
      assertEquals(op.getReturnValue(), result);
      log.tracef("%s operation is done", op);

      // Stop blocking get commands on cache0
//      beforeCache0Barrier.await(10, TimeUnit.SECONDS);
//      beforeCache0Barrier.await(10, TimeUnit.SECONDS);
      cache0.removeInterceptor(BlockingInterceptor.class);

      // Allow the rebalance to end
      int postJoinTopologyId = duringJoinTopologyId + 1;
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(2));

      // Wait for the topology to change everywhere
      TestingUtil.waitForRehashToComplete(cache0, cache1, cache2);

      // Check the value on all the nodes
View Full Code Here

      int preJoinTopologyId = cache0.getComponentRegistry().getStateTransferManager().getCacheTopology().getTopologyId();

      // Block any state response commands on cache0
      // So that we can install the spy ClusteringDependentLogic on cache1 before state transfer is applied
      final CheckPoint checkPoint = new CheckPoint();
      ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(cache0);

      // Start the joiner
      log.tracef("Starting the cache on the joiner");
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);

      final AdvancedCache<Object,Object> cache1 = advancedCache(1);

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 2 &&
                  cache1.getRpcManager().getMembers().size() == 2;
         }
      });

      // Every PutKeyValueCommand will be blocked before committing the entry on cache1
      blockEntryCommit(checkPoint, cache1);

      // Wait for cache0 to collect the state to send to cache1 (including our previous value).
      blockingRpcManager0.waitForCommandToBlock();
      // Allow the state to be applied on cache1 (writing the old value for our entry)
      blockingRpcManager0.stopBlocking();

      // Wait for state transfer tx/operation to call commitEntry on cache1
      checkPoint.awaitStrict("pre_commit_entry_" + key + "_from_" + null, 5, SECONDS);

      // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup owner for the retry
      // The put command will be blocked on cache1 just before committing the entry.
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return op.perform(cache0, key);
         }
      });

      // Wait for the user tx/operation to call commitEntry on cache1
      boolean blocked = checkPoint.peek(3, SECONDS, "pre_commit_entry_" + key + "_from_" + address(0)) == null;
      assertTrue(blocked);

      // Allow the command to commit (though it will still be blocked)
      // Trigger the event twice because the command may be retried (if the rebalance finishes first)
      checkPoint.trigger("resume_commit_entry_" + key + "_from_" + address(0), 2);

      // Allow state transfer to commit
      checkPoint.trigger("resume_commit_entry_" + key + "_from_" + null);

      // Wait for both state transfer and the command to commit
      checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + null, 10, SECONDS);
      checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + address(0), 10, SECONDS);

      // Wait for the command to finish and check that it didn't fail
      Object result = future.get(10, TimeUnit.SECONDS);
      assertEquals(op.getReturnValue(), result);
      log.tracef("%s operation is done", op);
View Full Code Here

   public void testPrimaryOwnerLeavingDuringPutIfAbsent() throws Exception {
      doTest(true);
   }

   private void doTest(final boolean conditional) throws Exception {
      CheckPoint checkPoint = new CheckPoint();
      LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
      int preJoinTopologyId = ltm0.getCacheTopology(CACHE_NAME).getTopologyId();

      final AdvancedCache<Object, Object> cache0 = advancedCache(0);
      addBlockingLocalTopologyManager(manager(0), checkPoint, preJoinTopologyId);

      final AdvancedCache<Object, Object> cache1 = advancedCache(1);
      addBlockingLocalTopologyManager(manager(1), checkPoint, preJoinTopologyId);

      // Add a new member and block the rebalance before the final topology is installed
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);
      addBlockingLocalTopologyManager(manager(2), checkPoint, preJoinTopologyId);

      log.tracef("Starting the cache on the joiner");
      final AdvancedCache<Object,Object> cache2 = advancedCache(2);
      int duringJoinTopologyId = preJoinTopologyId + 1;

      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(2));

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 3 &&
                  cache1.getRpcManager().getMembers().size() == 3 &&
                  cache2.getRpcManager().getMembers().size() == 3;
         }
      });

      CacheTopology duringJoinTopology = ltm0.getCacheTopology(CACHE_NAME);
      assertEquals(duringJoinTopologyId, duringJoinTopology.getTopologyId());
      assertNotNull(duringJoinTopology.getPendingCH());
      final MagicKey key = getKeyForCache2(duringJoinTopology.getPendingCH());
      log.tracef("Rebalance started. Found key %s with current owners %s and pending owners %s", key,
            duringJoinTopology.getCurrentCH().locateOwners(key), duringJoinTopology.getPendingCH().locateOwners(key));

      // Every PutKeyValueCommand will be blocked before reaching the distribution interceptor on cache1
      CyclicBarrier beforeCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(beforeCache1Barrier,
            PutKeyValueCommand.class, false);
      cache1.addInterceptorBefore(blockingInterceptor1, NonTxConcurrentDistributionInterceptor.class);

      // Every PutKeyValueCommand will be blocked after returning to the distribution interceptor on cache2
      CyclicBarrier afterCache2Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor2 = new BlockingInterceptor(afterCache2Barrier,
            PutKeyValueCommand.class, true);
      cache2.addInterceptorBefore(blockingInterceptor2, StateTransferInterceptor.class);

      // Put from cache0 with cache0 as primary owner, cache2 will become the primary owner for the retry
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return conditional ? cache0.putIfAbsent(key, "v") : cache0.put(key, "v");
         }
      });

      // Wait for the command to be executed on cache2 and unblock it
      afterCache2Barrier.await(10, TimeUnit.SECONDS);
      afterCache2Barrier.await(10, TimeUnit.SECONDS);

      // Allow the topology update to proceed on all the caches
      int postJoinTopologyId = duringJoinTopologyId + 1;
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(2));

      // Wait for the topology to change everywhere
      TestingUtil.waitForRehashToComplete(cache0, cache1, cache2);

      // Allow the put command to throw an OutdatedTopologyException on cache1
View Full Code Here

      cache(0).put(k1, "v1");

      final StateTransferManager stm = cache(0).getAdvancedCache().getComponentRegistry().getStateTransferManager();
      final int initialTopologyId = stm.getCacheTopology().getTopologyId();

      final CheckPoint checkPoint = new CheckPoint();
      replaceInvocationHandler(checkPoint, manager(0), StateResponseCommand.class);
      replaceInvocationHandler(checkPoint, manager(1), StateRequestCommand.class);
      replaceInvocationHandler(checkPoint, manager(2), StateRequestCommand.class);

      log.debugf("Killing node %s", address(3));
      cache(3).stop();

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            // Wait for the rebalance cache topology to be installed
            return stm.getCacheTopology().getTopologyId() == initialTopologyId + 2;
         }
      });

      // Allow cache 0 to request transactions from caches 1 and 2 (in any order)
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(1));
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(2));
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(1), 10, SECONDS);
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(2), 10, SECONDS);

      // See which cache receives the START_STATE_TRANSFER command first. We'll kill the other.
      String event = checkPoint.peek(5, TimeUnit.SECONDS, "IN_START_STATE_TRANSFER_" + address(1),
            "IN_START_STATE_TRANSFER_" + address(2));
      int liveNode = event.endsWith(address(1).toString()) ? 1 : 2;
      int nodeToKill = liveNode == 1 ? 2 : 1;
      List<Address> keyOwners = cache(0).getAdvancedCache().getDistributionManager().locate(k1);
      log.debugf("Killing node %s. Key %s is located on %s", address(nodeToKill), k1, keyOwners);
      log.debugf("Data on node %s: %s", address(1), cache(1).keySet());
      log.debugf("Data on node %s: %s", address(2), cache(2).keySet());

      // Now that we know which node to kill, allow the START_STATE_TRANSFER command to proceed.
      // The corresponding StateResponseCommand will be blocked on cache 0
      checkPoint.await("IN_START_STATE_TRANSFER_" + address(liveNode), 1, SECONDS);
      checkPoint.trigger("OUT_START_STATE_TRANSFER_" + address(liveNode));

      // Kill cache cacheToStop to force a topology update.
      // The topology update will remove the transfers from cache(nodeToKill).
      cache(nodeToKill).stop();

      // Now allow cache 0 to process the state from cache(liveNode)
      checkPoint.awaitStrict("IN_RESPONSE_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_RESPONSE_" + address(liveNode));

      log.debugf("Received segments?");
      Thread.sleep(1000);

      // Wait for cache 0 to request the transactions for the failed segments from cache 1
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(liveNode));

      // ISPN-3120: Now cache 0 should think it finished receiving state. Allow all the commands to proceed.
      checkPoint.awaitStrict("IN_START_STATE_TRANSFER_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_START_STATE_TRANSFER_" + address(liveNode));

      checkPoint.awaitStrict("IN_RESPONSE_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_RESPONSE_" + address(liveNode));

      TestingUtil.waitForRehashToComplete(cache(0), cache(liveNode));

      log.debugf("Final checkpoint status: %s", checkPoint);
      DataContainer dataContainer = TestingUtil.extractComponent(cache(0), DataContainer.class);
View Full Code Here

      cache(0).put(k1, "v1");

      final StateTransferManager stm = cache(0).getAdvancedCache().getComponentRegistry().getStateTransferManager();
      final int initialTopologyId = stm.getCacheTopology().getTopologyId();

      final CheckPoint checkPoint = new CheckPoint();
      replaceInvocationHandler(checkPoint, manager(0), StateResponseCommand.class);
      replaceInvocationHandler(checkPoint, manager(1), StateRequestCommand.class);
      replaceInvocationHandler(checkPoint, manager(2), StateRequestCommand.class);

      log.debugf("Killing node %s", address(3));
      cache(3).stop();

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            // Wait for the rebalance cache topology to be installed
            return stm.getCacheTopology().getTopologyId() == initialTopologyId + 2;
         }
      });

      // Allow cache 0 to request transactions from caches 1 and 2 (in any order)
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(1));
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(2));
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(1), 10, SECONDS);
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(2), 10, SECONDS);

      // See which cache receives the START_STATE_TRANSFER command first. We'll kill the other.
      String event = checkPoint.peek(5, TimeUnit.SECONDS, "IN_START_STATE_TRANSFER_" + address(1),
            "IN_START_STATE_TRANSFER_" + address(2));
      int liveNode = event.endsWith(address(1).toString()) ? 1 : 2;
      int nodeToKill = liveNode == 1 ? 2 : 1;
      List<Address> keyOwners = cache(0).getAdvancedCache().getDistributionManager().locate(k1);
      log.debugf("Killing node %s. Key %s is located on %s", address(nodeToKill), k1, keyOwners);
      log.debugf("Data on node %s: %s", address(1), cache(1).keySet());
      log.debugf("Data on node %s: %s", address(2), cache(2).keySet());

      // Now that we know which node to kill, allow the START_STATE_TRANSFER command to proceed.
      // The corresponding StateResponseCommand will be blocked on cache 0
      checkPoint.await("IN_START_STATE_TRANSFER_" + address(liveNode), 1, SECONDS);
      checkPoint.trigger("OUT_START_STATE_TRANSFER_" + address(liveNode));

      // Kill cache cacheToStop to force a topology update.
      // The topology update will remove the transfers from cache(nodeToKill).
      cache(nodeToKill).stop();

      // Now allow cache 0 to process the state from cache(liveNode)
      checkPoint.awaitStrict("IN_RESPONSE_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_RESPONSE_" + address(liveNode));

      log.debugf("Received segments?");
      Thread.sleep(1000);

      // Wait for cache 0 to request the transactions for the failed segments from cache 1
      checkPoint.awaitStrict("IN_GET_TRANSACTIONS_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_GET_TRANSACTIONS_" + address(liveNode));

      // ISPN-3120: Now cache 0 should think it finished receiving state. Allow all the commands to proceed.
      checkPoint.awaitStrict("IN_START_STATE_TRANSFER_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_START_STATE_TRANSFER_" + address(liveNode));

      checkPoint.awaitStrict("IN_RESPONSE_" + address(liveNode), 10, SECONDS);
      checkPoint.trigger("OUT_RESPONSE_" + address(liveNode));

      TestingUtil.waitForRehashToComplete(cache(0), cache(liveNode));

      log.debugf("Final checkpoint status: %s", checkPoint);
      DataContainer dataContainer = TestingUtil.extractComponent(cache(0), DataContainer.class);
View Full Code Here

         }));

         cyclicBarrier.await(10, SECONDS);

         // Block the rebalance confirmation on nonOwnerCache
         CheckPoint checkPoint = new CheckPoint();
         log.trace("Adding proxy to state transfer");
         waitUntilStateBeingTransferred(nonOwnerCache, checkPoint);

         backupOwnerCache.getCacheManager().stop();

         // Wait for non owner to just about get state
         checkPoint.awaitStrict("pre_state_apply_invoked_for_" + nonOwnerCache, 10, SECONDS);

         // let prepare complete and thus commit command invalidating on nonOwner
         cyclicBarrier.await(10, SECONDS);

         assertEquals(op.getReturnValue(), future.get(10, SECONDS));

         // let state transfer go
         checkPoint.trigger("pre_state_apply_release_for_" + nonOwnerCache);

         TestingUtil.waitForRehashToComplete(primaryOwnerCache, nonOwnerCache);

         switch (op) {
            case REMOVE:
View Full Code Here

      }

      int preJoinTopologyId = primaryOwnerCache.getComponentRegistry().getStateTransferManager().getCacheTopology().getTopologyId();

      // Block any state response commands on cache0
      CheckPoint checkPoint = new CheckPoint();
      ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(primaryOwnerCache);

      // Block the rebalance confirmation on cache0
      blockRebalanceConfirmation(primaryOwnerCache.getCacheManager(), checkPoint);

      assertEquals(primaryOwnerCache.getCacheManager().getCoordinator(), primaryOwnerCache.getCacheManager().getAddress());

      // Remove the leaver
      log.trace("Stopping the cache");
      backupOwnerCache.getCacheManager().stop();

      int rebalanceTopologyId = preJoinTopologyId + 2;

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return primaryOwnerCache.getRpcManager().getMembers().size() == 2 &&
                  nonOwnerCache.getRpcManager().getMembers().size() == 2;
         }
      });

      assertEquals(primaryOwnerCache.getCacheManager().getCoordinator(), primaryOwnerCache.getCacheManager().getAddress());

      // Wait for cache0 to collect the state to send to cache1 (including our previous value).
      blockingRpcManager0.waitForCommandToBlock();

      // Every PutKeyValueCommand will be blocked before committing the entry on cache1
      CyclicBarrier beforeCommitCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(beforeCommitCache1Barrier,
                                                                         op.getCommandClass(), true);
      nonOwnerCache.addInterceptorAfter(blockingInterceptor1, EntryWrappingInterceptor.class);

      // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 will become a backup owner for the retry
      // The put command will be blocked on cache1 just before committing the entry.
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return op.perform(primaryOwnerCache, key);
         }
      });

      // Wait for the entry to be wrapped on cache1
      beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

      // Remove the interceptor so we don't mess up any other state transfer puts
      removeAllBlockingInterceptorsFromCache(nonOwnerCache);

      // Allow the state to be applied on cache1 (writing the old value for our entry)
      blockingRpcManager0.stopBlocking();

      // Wait for second in line to finish applying the state, but don't allow the rebalance confirmation to be processed.
      // (It would change the topology and it would trigger a retry for the command.)
      checkPoint.awaitStrict("pre_rebalance_confirmation_" + rebalanceTopologyId + "_from_" +
                                   primaryOwnerCache.getCacheManager().getAddress(), 10, SECONDS);

      // Now allow the command to commit on cache1
      beforeCommitCache1Barrier.await(10, TimeUnit.SECONDS);

      // Wait for the command to finish and check that it didn't fail
      Object result = future.get(10, TimeUnit.SECONDS);
      assertEquals(op.getReturnValue(), result);
      log.tracef("%s operation is done", op);

      // Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + primaryOwnerCache.getCacheManager().getAddress());
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + nonOwnerCache.getCacheManager().getAddress());
      TestingUtil.waitForRehashToComplete(primaryOwnerCache, nonOwnerCache);

      switch (op) {
         case REMOVE:
         case REMOVE_EXACT:
View Full Code Here

      int preJoinTopologyId = cache0.getComponentRegistry().getStateTransferManager().getCacheTopology().getTopologyId();

      // Block any state response commands on cache0
      // So that we can install the spy ClusteringDependentLogic on cache1 before state transfer is applied
      final CheckPoint checkPoint = new CheckPoint();
      ControlledRpcManager blockingRpcManager0 = blockStateResponseCommand(cache0);

      // Block the rebalance confirmation on cache0 (to avoid the retrying of commands)
      blockRebalanceConfirmation(manager(0), checkPoint);

      // Start the joiner
      log.tracef("Starting the cache on the joiner");
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);

      final AdvancedCache<Object,Object> cache1 = advancedCache(1);

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 2 &&
                  cache1.getRpcManager().getMembers().size() == 2;
         }
      });

      // Every PutKeyValueCommand will be blocked before committing the entry on cache1
      blockEntryCommit(checkPoint, cache1);

      // Wait for cache0 to collect the state to send to cache1 (including our previous value).
      blockingRpcManager0.waitForCommandToBlock();
      // Allow the state to be applied on cache1 (writing the old value for our entry)
      blockingRpcManager0.stopBlocking();

      // Wait for state transfer tx/operation to call commitEntry on cache1 and block
      checkPoint.awaitStrict("pre_commit_entry_" + key + "_from_" + null, 5, SECONDS);

      // Put/Replace/Remove from cache0 with cache0 as primary owner, cache1 as backup owner
      // The put command will be blocked on cache1 just before committing the entry.
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return op.perform(cache0, key);
         }
      });

      // Check that the user write is blocked by the state transfer write
      boolean blocked = checkPoint.peek(3, SECONDS, "pre_commit_entry_" + key + "_from_" + address(0)) == null;
      assertTrue(blocked);

      // Allow state transfer to commit
      checkPoint.trigger("resume_commit_entry_" + key + "_from_" + null);

      // Check that the user operation can now commit the entry
      checkPoint.awaitStrict("pre_commit_entry_" + key + "_from_" + address(0), 5, SECONDS);

      // Allow the user put to commit
      checkPoint.trigger("resume_commit_entry_" + key + "_from_" + address(0));

      // Wait for both state transfer and the command to commit
      checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + null, 10, SECONDS);
      checkPoint.awaitStrict("post_commit_entry_" + key + "_from_" + address(0), 10, SECONDS);

      // Wait for the command to finish and check that it didn't fail
      Object result = future.get(10, TimeUnit.SECONDS);
      assertEquals(op.getReturnValue(), result);
      log.tracef("%s operation is done", op);

      // Allow the rebalance confirmation to proceed and wait for the topology to change everywhere
      int rebalanceTopologyId = preJoinTopologyId + 1;
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(0));
      checkPoint.trigger("resume_rebalance_confirmation_" + rebalanceTopologyId + "_from_" + address(1));
      TestingUtil.waitForRehashToComplete(cache0, cache1);

      // Check the value on all the nodes
      assertEquals(op.getValue(), cache0.get(key));
      assertEquals(op.getValue(), cache1.get(key));
View Full Code Here

   }

   private void doTest(final boolean conditional) throws Exception {
      final String key = "testkey";

      CheckPoint checkPoint = new CheckPoint();
      LocalTopologyManager ltm0 = TestingUtil.extractGlobalComponent(manager(0), LocalTopologyManager.class);
      int preJoinTopologyId = ltm0.getCacheTopology(CACHE_NAME).getTopologyId();

      final AdvancedCache<Object, Object> cache0 = advancedCache(0);
      addBlockingLocalTopologyManager(manager(0), checkPoint, preJoinTopologyId);

      final AdvancedCache<Object, Object> cache1 = advancedCache(1);
      addBlockingLocalTopologyManager(manager(1), checkPoint, preJoinTopologyId);

      // Add a new member and block the rebalance before the final topology is installed
      ConfigurationBuilder c = getConfigurationBuilder();
      c.clustering().stateTransfer().awaitInitialTransfer(false);
      addClusterEnabledCacheManager(c);
      addBlockingLocalTopologyManager(manager(2), checkPoint, preJoinTopologyId);

      log.tracef("Starting the cache on the joiner");
      final AdvancedCache<Object,Object> cache2 = advancedCache(2);
      int duringJoinTopologyId = preJoinTopologyId + 1;

      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + duringJoinTopologyId + "_on_" + address(2));

      // Wait for the write CH to contain the joiner everywhere
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return cache0.getRpcManager().getMembers().size() == 3 &&
                  cache1.getRpcManager().getMembers().size() == 3 &&
                  cache2.getRpcManager().getMembers().size() == 3;
         }
      });

      CacheTopology duringJoinTopology = ltm0.getCacheTopology(CACHE_NAME);
      assertEquals(duringJoinTopologyId, duringJoinTopology.getTopologyId());
      assertNotNull(duringJoinTopology.getPendingCH());
      log.tracef("Rebalance started. Found key %s with current owners %s and pending owners %s", key,
            duringJoinTopology.getCurrentCH().locateOwners(key), duringJoinTopology.getPendingCH().locateOwners(key));

      // Every PutKeyValueCommand will be blocked before reaching the distribution interceptor on cache1
      CyclicBarrier beforeCache1Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor1 = new BlockingInterceptor(beforeCache1Barrier,
            PutKeyValueCommand.class, false);
      cache1.addInterceptorBefore(blockingInterceptor1, NonTxDistributionInterceptor.class);

      // Every PutKeyValueCommand will be blocked after returning to the distribution interceptor on cache2
      CyclicBarrier afterCache2Barrier = new CyclicBarrier(2);
      BlockingInterceptor blockingInterceptor2 = new BlockingInterceptor(afterCache2Barrier,
            PutKeyValueCommand.class, true);
      cache2.addInterceptorBefore(blockingInterceptor2, StateTransferInterceptor.class);

      // Put from cache0 with cache0 as primary owner, cache2 will become the primary owner for the retry
      Future<Object> future = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            return conditional ? cache0.putIfAbsent(key, "v") : cache0.put(key, "v");
         }
      });

      // Wait for the command to be executed on cache2 and unblock it
      afterCache2Barrier.await(10, TimeUnit.SECONDS);
      afterCache2Barrier.await(10, TimeUnit.SECONDS);

      // Allow the topology update to proceed on all the caches
      int postJoinTopologyId = duringJoinTopologyId + 1;
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(0));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(1));
      checkPoint.trigger("allow_topology_" + postJoinTopologyId + "_on_" + address(2));

      // Wait for the topology to change everywhere
      TestingUtil.waitForRehashToComplete(cache0, cache1, cache2);

      // Allow the put command to throw an OutdatedTopologyException on cache1
View Full Code Here

TOP

Related Classes of org.infinispan.test.fwk.CheckPoint

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.