Package org.infinispan.test.concurrent

Examples of org.infinispan.test.concurrent.StateSequencer


      Object k7 = new MagicKey("k8", cache(c1), cache(a0));

      final Object[] allKeys = new Object[] {k0, k1, k2, k3, k4, k5, k6, k7};
      for (Object k : allKeys) cache(a0).put(k, k);

      StateSequencer ss = new StateSequencer();
      ss.logicalThread("main", "main:st_in_progress", "main:2nd_node_left", "main:cluster_unavailable");

      final StateTransferManager stm0 = advancedCache(a0).getComponentRegistry().getStateTransferManager();
      final int initialTopologyId = stm0.getCacheTopology().getTopologyId();
      StateSequencerUtil.advanceOnInboundRpc(ss, manager(a1), new CommandMatcher() {
         @Override
         public boolean accept(ReplicableCommand command) {
//            System.out.println("command = " + command + " received on " + address(cache(a1)));
            if (!(command instanceof StateResponseCommand))
               return false;
            StateResponseCommand responseCommand = (StateResponseCommand) command;
//            System.out.println(responseCommand.getCommandId()  + " == " + (initialTopologyId + 2));
            return initialTopologyId < responseCommand.getCommandId();
         }
      }).before("main:st_in_progress", "main:cluster_unavailable");

      // Prepare for rebalance. Manager a1 will request state from c0 for segment 2
      cchf.setMembersToUse(advancedCache(a0).getRpcManager().getTransport().getMembers());
      cchf.setOwnerIndexes(new int[]{a0, a1}, new int[]{a1, c0},
            new int[]{c0, a1}, new int[]{c0, a0});

      Address missing = address(c1);
      log.tracef("Before killing node %s", missing);
      crashCacheManagers(manager(c1));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1), manager(c0));

      ss.enter("main:2nd_node_left");

      missing = address(c0);
      log.tracef("Killing 2nd node %s", missing);
      crashCacheManagers(manager(c0));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1));

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            PartitionHandlingManager phm0 = TestingUtil.extractComponent(cache(a0), PartitionHandlingManager.class);
            return phm0.getState() == PartitionHandlingManager.PartitionState.UNAVAILABLE;
         }
      });
      ss.exit("main:2nd_node_left");

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            log.trace("Testing condition");
View Full Code Here


   public void testAsync() throws Throwable {
      doTest(CacheMode.DIST_ASYNC);
   }

   private void doTest(CacheMode cacheMode) throws Throwable {
      final StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("st", "st:block_get_transactions", "st:resume_get_transactions", "st:block_ch_update", "st:resume_ch_update");
      sequencer.logicalThread("tx", "tx:before_lock", "tx:block_remote_lock", "tx:resume_remote_lock", "tx:after_commit");

      // The lock will be acquired after rebalance has started, but before cache0 starts sending the transaction data to cache1
      sequencer.order("st:block_get_transactions", "tx:before_lock", "tx:block_remote_lock", "st:resume_get_transactions");
      // The tx will be committed (1PC) after cache1 has received all the state, but before the topology is updated
      sequencer.order("st:block_ch_update", "tx:resume_remote_lock", "tx:after_commit", "st:resume_ch_update");

      ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
      cfg.clustering().cacheMode(cacheMode)
            .stateTransfer().awaitInitialTransfer(false)
            .transaction().lockingMode(LockingMode.PESSIMISTIC);
      manager(0).defineConfiguration(CACHE_NAME, cfg.build());
      manager(1).defineConfiguration(CACHE_NAME, cfg.build());

      AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
      TransactionManager tm0 = cache0.getTransactionManager();
      StateTransferManager stm0 = TestingUtil.extractComponent(cache0, StateTransferManager.class);

      int initialTopologyId = stm0.getCacheTopology().getTopologyId();
      int rebalanceTopologyId = initialTopologyId + 1;
      final int finalTopologyId = rebalanceTopologyId + 1;

      // Block state request commands on cache0 until the lock command has been sent to cache1
      advanceOnComponentMethod(sequencer, cache0, StateProvider.class,
            matchMethodCall("getTransactionsForSegments").build())
            .before("st:block_get_transactions", "st:resume_get_transactions");
      // Block the topology update on cache0 until the tx has finished
      advanceOnGlobalComponentMethod(sequencer, manager(0), LocalTopologyManager.class,
            matchMethodCall("handleTopologyUpdate").withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build())
            .before("st:block_ch_update", "st:resume_ch_update");

      // Start cache 1, but the state request will be blocked on cache 0
      AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);

      // Block the remote lock command on cache 1
      advanceOnInboundRpc(sequencer, manager(1),
            matchCommand(LockControlCommand.class).withCache(CACHE_NAME).build())
            .before("tx:block_remote_lock", "tx:resume_remote_lock");


      // Wait for the rebalance to start
      sequencer.advance("tx:before_lock");
      assertEquals(rebalanceTopologyId, stm0.getCacheTopology().getTopologyId());

      // Start a transaction on cache 0
      MagicKey key = new MagicKey("testkey", cache0);
      tm0.begin();
      cache0.lock(key);
      tm0.commit();

      // Let the rebalance finish
      sequencer.advance("tx:after_commit");

      TestingUtil.waitForRehashToComplete(caches(CACHE_NAME));
      assertEquals(finalTopologyId, stm0.getCacheTopology().getTopologyId());

      // Check for stale locks
      final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
      final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0;
         }
      });

      sequencer.stop();
   }
View Full Code Here

      // Check that locks are released on B
      // Start another transaction on A: put(k, v2) with the same key
      // Check that the new transaction writes successfully
      // Allow the commit to proceed on C
      // Check that k=v2 everywhere
      StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("tx1", "tx1:begin", "tx1:block_commit_on_backup", "tx1:after_rollback_on_primary",
            "tx1:after_rollback_on_backup", "tx1:resume_commit_on_backup", "tx1:after_commit_on_backup", "tx1:check");
      sequencer.logicalThread("tx2", "tx2:begin", "tx2:end");

      sequencer.order("tx1:after_rollback_on_backup", "tx2:begin", "tx2:end", "tx1:resume_commit_on_backup");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(CommitCommand.class).matchCount(0).build())
            .before("tx1:block_commit_on_backup", "tx1:resume_commit_on_backup").after("tx1:after_commit_on_backup");

      advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .after("tx1:after_rollback_on_primary");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .after("tx1:after_rollback_on_backup");

      assertEquals(Arrays.asList(address(1), address(2)), advancedCache(0).getDistributionManager().locate(TEST_KEY));
      sequencer.advance("tx1:begin");

      tm(0).begin();
      cache(0).put(TEST_KEY, TX1_VALUE);
      tm(0).commit();

      sequencer.advance("tx2:begin");
      LockManager lockManager1 = TestingUtil.extractLockManager(cache(1));
      assertFalse(lockManager1.isLocked(TEST_KEY));

      tm(0).begin();
      cache(0).put(TEST_KEY, TX2_VALUE);
      tm(0).commit();

      checkValue();
      sequencer.advance("tx2:end");

      sequencer.advance("tx1:check");
      checkValue();
   }
View Full Code Here

      // Check that k is still locked on B
      // Allow the commit to proceed on C
      // Allow the rollback to proceed on C
      // Check that k=v1 everywhere
      // Check that locks are released on B
      final StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("tx1", "tx1:begin", "tx1:block_commit_on_backup", "tx1:after_rollback_on_primary",
            "tx1:block_rollback_on_backup", "tx1:resume_commit_on_backup", "tx1:after_commit_on_backup",
            "tx1:resume_rollback_on_backup", "tx1:after_rollback_on_backup", "tx1:check");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(CommitCommand.class).matchCount(0).build())
            .before("tx1:block_commit_on_backup", "tx1:resume_commit_on_backup").after("tx1:after_commit_on_backup");

      advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .after("tx1:after_rollback_on_primary");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .before("tx1:block_rollback_on_backup").after("tx1:after_rollback_on_backup");

      assertEquals(Arrays.asList(address(1), address(2)), advancedCache(0).getDistributionManager().locate(TEST_KEY));
      Future<Object> lockCheckFuture = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            sequencer.enter("tx1:resume_rollback_on_backup");
            try {
               assertTrue(TestingUtil.extractLockManager(cache(1)).isLocked(TEST_KEY));
            } finally {
               sequencer.exit("tx1:resume_rollback_on_backup");
            }
            return null;
         }
      });


      sequencer.advance("tx1:begin");

      tm(0).begin();
      cache(0).put(TEST_KEY, TX1_VALUE);
      tm(0).commit();

      sequencer.advance("tx1:check");
      assertFalse(TestingUtil.extractLockManager(cache(1)).isLocked(TEST_KEY));
      lockCheckFuture.get(10, TimeUnit.SECONDS);
   }
View Full Code Here

   private static final String VALUE = "value";

   ControlledConsistentHashFactory consistentHashFactory = new ControlledConsistentHashFactory(0, 1, 2);

   public void testReplay() throws Exception {
      final StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("tx", "tx:before_prepare_replay", "tx:resume_prepare_replay");
      sequencer.logicalThread("sim", "sim:before_extra_commit", "sim:during_extra_commit");
      sequencer.order("tx:before_prepare_replay", "sim:before_extra_commit", "sim:during_extra_commit", "tx:resume_prepare_replay");

      final Object key = "key";
      assertEquals(Arrays.asList(address(0), address(1), address(2)), advancedCache(0).getDistributionManager().locate(key));
      Cache<Object, Object> primaryOwnerCache = cache(0);
      final Cache<Object, Object> newBackupOwnerCache = cache(3);
      final CountingInterceptor newBackupCounter = CountingInterceptor.inject(newBackupOwnerCache);
      final CountingInterceptor primaryCounter = CountingInterceptor.inject(primaryOwnerCache);
      final CountingInterceptor oldBackup2Counter = CountingInterceptor.inject(cache(2));

      advanceOnInterceptor(sequencer, newBackupOwnerCache, CallInterceptor.class,
            matchCommand(PrepareCommand.class).matchCount(0).build())
            .before("tx:before_prepare_replay", "tx:resume_prepare_replay");
      advanceOnInterceptor(sequencer, newBackupOwnerCache, TransactionSynchronizerInterceptor.class,
            matchCommand(CommitCommand.class).matchCount(1).build())
            .before("sim:during_extra_commit");

      final DummyTransactionManager transactionManager = (DummyTransactionManager) tm(0);
      transactionManager.begin();
      primaryOwnerCache.put(key, VALUE);

      final DummyTransaction transaction = transactionManager.getTransaction();
      TransactionTable transactionTable0 = TestingUtil.getTransactionTable(primaryOwnerCache);
      final GlobalTransaction gtx = transactionTable0.getLocalTransaction(transaction).getGlobalTransaction();
      transaction.runPrepare();
      assertEquals("Wrong transaction status before killing backup owner.",
            Status.STATUS_PREPARED, transaction.getStatus());

      // Now, we kill cache(1). the transaction is prepared in cache(1) and it should be transferred to cache(2)
      killMember(1);

      final int currentTopologyId = TestingUtil.extractComponentRegistry(primaryOwnerCache).getStateTransferManager().getCacheTopology().getTopologyId();
      Future<Object> secondCommitFuture = fork(new Callable<Object>() {
         @Override
         public Object call() throws Exception {
            // Wait for the commit command to block replaying the prepare on the new backup
            sequencer.advance("sim:before_extra_commit");
            // And try to run another commit command
            CommitCommand command = new CommitCommand(newBackupOwnerCache.getName(), gtx);
            command.setTopologyId(currentTopologyId);
            CommandsFactory cf = TestingUtil.extractCommandsFactory(newBackupOwnerCache);
            cf.initializeReplicableCommand(command, true);
View Full Code Here

      // Start a tx on A: put(k, v1), owners(k) = [B (primary) and C (backup)]
      // Block the prepare on B and C so that it times out
      // Wait for the rollback command to be executed on B and C
      // Unblock the prepare on B and C
      // Check that there are no locked keys or remote transactions on B and C
      StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("main", "main:start", "main:check");
      sequencer.logicalThread("primary", "primary:block_prepare", "primary:after_rollback", "primary:resume_prepare",
            "primary:after_prepare");
      sequencer.logicalThread("backup", "backup:block_prepare", "backup:after_rollback", "backup:resume_prepare",
            "backup:after_prepare");

      sequencer.order("main:start", "primary:block_prepare", "primary:after_prepare", "main:check");
      sequencer.order("main:start", "backup:block_prepare", "backup:after_prepare", "main:check");

      advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
            matchCommand(PrepareCommand.class).matchCount(0).build())
            .before("primary:block_prepare", "primary:resume_prepare").after("primary:after_prepare");

      advanceOnInterceptor(sequencer, cache(1), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .after("primary:after_rollback");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(PrepareCommand.class).matchCount(0).build())
            .before("backup:block_prepare", "backup:resume_prepare").after("backup:after_prepare");

      advanceOnInterceptor(sequencer, cache(2), StateTransferInterceptor.class,
            matchCommand(RollbackCommand.class).build())
            .after("backup:after_rollback");


      assertEquals(Arrays.asList(address(1), address(2)), advancedCache(0).getDistributionManager().locate(TEST_KEY));
      sequencer.advance("main:start");

      tm(0).begin();
      cache(0).put(TEST_KEY, TX1_VALUE);
      try {
         tm(0).commit();
         fail("Exception expected during commit");
      } catch (Exception e) {
         // expected
      }

      tm(0).begin();
      cache(0).put(TEST_KEY, TX2_VALUE);
      GlobalTransaction gtx1 = transactionTable(0).getLocalTransaction(tm(0).getTransaction()).getGlobalTransaction();
      tm(0).commit();

      // Wait for the 1st tx to be removed from the completed txs table
      Thread.sleep(COMPLETED_TX_TIMEOUT + 1000);

      assertTrue(transactionTable(1).isTransactionCompleted(gtx1));
      assertTrue(transactionTable(2).isTransactionCompleted(gtx1));

      sequencer.advance("main:check");

      LockManager lockManager1 = TestingUtil.extractLockManager(cache(1));
      assertFalse(lockManager1.isLocked(TEST_KEY));

      assertFalse(transactionTable(1).containRemoteTx(gtx1));
View Full Code Here

      Object k7 = new MagicKey("k8", cache(c1), cache(a0));

      final Object[] allKeys = new Object[] {k0, k1, k2, k3, k4, k5, k6, k7};
      for (Object k : allKeys) cache(a0).put(k, k);

      StateSequencer ss = new StateSequencer();
      ss.logicalThread("main", "main:st_in_progress", "main:2nd_node_left", "main:cluster_unavailable");

      final StateTransferManager stm0 = advancedCache(a0).getComponentRegistry().getStateTransferManager();
      final int initialTopologyId = stm0.getCacheTopology().getTopologyId();
      StateSequencerUtil.advanceOnInboundRpc(ss, manager(a1), new CommandMatcher() {
         @Override
         public boolean accept(ReplicableCommand command) {
            if (!(command instanceof StateResponseCommand))
               return false;
            StateResponseCommand responseCommand = (StateResponseCommand) command;
            return initialTopologyId < responseCommand.getCommandId();
         }
      }).before("main:st_in_progress", "main:cluster_unavailable");

      // Prepare for rebalance. Manager a1 will request state from c0 for segment 2
      cchf.setMembersToUse(advancedCache(a0).getRpcManager().getTransport().getMembers());
      cchf.setOwnerIndexes(new int[]{a0, a1}, new int[]{a1, c0},
            new int[]{c0, a1}, new int[]{c0, a0});

      Address missing = address(c1);
      log.tracef("Before killing node %s", missing);
      crashCacheManagers(manager(c1));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1), manager(c0));

      ss.enter("main:2nd_node_left");

      missing = address(c0);
      log.tracef("Killing 2nd node %s", missing);
      crashCacheManagers(manager(c0));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1));

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            PartitionHandlingManager phm0 = TestingUtil.extractComponent(cache(a0), PartitionHandlingManager.class);
            return phm0.getAvailabilityMode() == expectedAvailabilityMode;
         }
      });
      ss.exit("main:2nd_node_left");

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            log.trace("Testing condition");
View Full Code Here

   public void testAsync() throws Throwable {
      doTest(CacheMode.DIST_ASYNC);
   }

   private void doTest(CacheMode cacheMode) throws Throwable {
      final StateSequencer sequencer = new StateSequencer();
      sequencer.logicalThread("st", "st:block_get_transactions", "st:resume_get_transactions", "st:block_ch_update", "st:resume_ch_update");
      sequencer.logicalThread("tx", "tx:before_lock", "tx:block_remote_lock", "tx:resume_remote_lock", "tx:after_commit");

      // The lock will be acquired after rebalance has started, but before cache0 starts sending the transaction data to cache1
      sequencer.order("st:block_get_transactions", "tx:before_lock", "tx:block_remote_lock", "st:resume_get_transactions");
      // The tx will be committed (1PC) after cache1 has received all the state, but before the topology is updated
      sequencer.order("st:block_ch_update", "tx:resume_remote_lock", "tx:after_commit", "st:resume_ch_update");

      ConfigurationBuilder cfg = TestCacheManagerFactory.getDefaultCacheConfiguration(true);
      cfg.clustering().cacheMode(cacheMode)
            .stateTransfer().awaitInitialTransfer(false)
            .transaction().lockingMode(LockingMode.PESSIMISTIC);
      manager(0).defineConfiguration(CACHE_NAME, cfg.build());
      manager(1).defineConfiguration(CACHE_NAME, cfg.build());

      AdvancedCache<Object, Object> cache0 = advancedCache(0, CACHE_NAME);
      TransactionManager tm0 = cache0.getTransactionManager();
      StateTransferManager stm0 = TestingUtil.extractComponent(cache0, StateTransferManager.class);

      int initialTopologyId = stm0.getCacheTopology().getTopologyId();
      int rebalanceTopologyId = initialTopologyId + 1;
      final int finalTopologyId = rebalanceTopologyId + 1;

      // Block state request commands on cache0 until the lock command has been sent to cache1
      advanceOnComponentMethod(sequencer, cache0, StateProvider.class,
            matchMethodCall("getTransactionsForSegments").build())
            .before("st:block_get_transactions", "st:resume_get_transactions");
      // Block the topology update on cache0 until the tx has finished
      advanceOnGlobalComponentMethod(sequencer, manager(0), LocalTopologyManager.class,
            matchMethodCall("handleConsistentHashUpdate").withMatcher(1, new CacheTopologyMatcher(finalTopologyId)).build())
            .before("st:block_ch_update", "st:resume_ch_update");

      // Start cache 1, but the state request will be blocked on cache 0
      AdvancedCache<Object, Object> cache1 = advancedCache(1, CACHE_NAME);

      // Block the remote lock command on cache 1
      advanceOnInboundRpc(sequencer, manager(1),
            matchCommand(LockControlCommand.class).withCache(CACHE_NAME).build())
            .before("tx:block_remote_lock", "tx:resume_remote_lock");


      // Wait for the rebalance to start
      sequencer.advance("tx:before_lock");
      assertEquals(rebalanceTopologyId, stm0.getCacheTopology().getTopologyId());

      // Start a transaction on cache 0
      MagicKey key = new MagicKey("testkey", cache0);
      tm0.begin();
      cache0.lock(key);
      tm0.commit();

      // Let the rebalance finish
      sequencer.advance("tx:after_commit");

      TestingUtil.waitForRehashToComplete(caches(CACHE_NAME));
      assertEquals(finalTopologyId, stm0.getCacheTopology().getTopologyId());

      // Check for stale locks
      final TransactionTable tt0 = TestingUtil.extractComponent(cache0, TransactionTable.class);
      final TransactionTable tt1 = TestingUtil.extractComponent(cache1, TransactionTable.class);
      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            return tt0.getLocalTxCount() == 0 && tt1.getRemoteTxCount() == 0;
         }
      });

      sequencer.stop();
   }
View Full Code Here

      Object k7 = new MagicKey("k8", cache(c1), cache(a0));

      final Object[] allKeys = new Object[] {k0, k1, k2, k3, k4, k5, k6, k7};
      for (Object k : allKeys) cache(a0).put(k, k);

      StateSequencer ss = new StateSequencer();
      ss.logicalThread("main", "main:st_in_progress", "main:2nd_node_left", "main:cluster_unavailable");

      final StateTransferManager stm0 = advancedCache(a0).getComponentRegistry().getStateTransferManager();
      final int initialTopologyId = stm0.getCacheTopology().getTopologyId();
      StateSequencerUtil.advanceOnInboundRpc(ss, manager(a1), new CommandMatcher() {
         @Override
         public boolean accept(ReplicableCommand command) {
            if (!(command instanceof StateResponseCommand))
               return false;
            StateResponseCommand responseCommand = (StateResponseCommand) command;
            return initialTopologyId < responseCommand.getCommandId();
         }
      }).before("main:st_in_progress", "main:cluster_unavailable");

      // Prepare for rebalance. Manager a1 will request state from c0 for segment 2
      cchf.setMembersToUse(advancedCache(a0).getRpcManager().getTransport().getMembers());
      cchf.setOwnerIndexes(new int[]{a0, a1}, new int[]{a1, c0},
            new int[]{c0, a1}, new int[]{c0, a0});

      Address missing = address(c1);
      log.tracef("Before killing node %s", missing);
      crashCacheManagers(manager(c1));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1), manager(c0));

      ss.enter("main:2nd_node_left");

      missing = address(c0);
      log.tracef("Killing 2nd node %s", missing);
      crashCacheManagers(manager(c0));
      installNewView(advancedCache(a0).getRpcManager().getTransport().getMembers(), missing, manager(a0), manager(a1));

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            PartitionHandlingManager phm0 = TestingUtil.extractComponent(cache(a0), PartitionHandlingManager.class);
            return phm0.getAvailabilityMode() == expectedAvailabilityMode;
         }
      });
      ss.exit("main:2nd_node_left");

      eventually(new Condition() {
         @Override
         public boolean isSatisfied() throws Exception {
            log.trace("Testing condition");
View Full Code Here

      c.transaction().lockingMode(LockingMode.PESSIMISTIC);
      return c;
   }

   public void testPutStartedBeforeRebalance() throws Exception {
      sequencer = new StateSequencer();
      sequencer.logicalThread("tx", "tx:perform_op", "tx:check_locks", "tx:before_commit", "tx:after_commit");
      sequencer.logicalThread("rebalance", "rebalance:before_get_tx", "rebalance:after_get_tx",
            "rebalance:before_confirm", "rebalance:end");
      sequencer.order("tx:perform_op", "rebalance:before_get_tx", "rebalance:after_get_tx", "tx:check_locks",
            "rebalance:before_confirm", "rebalance:end", "tx:before_commit");
View Full Code Here

TOP

Related Classes of org.infinispan.test.concurrent.StateSequencer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.