partition1.setBindIntoJndi(false);
partition1.create();
partition1.start();
DistributedReplicantManager drm1 = partition1.getDistributedReplicantManager();
Thread.sleep(10000);
// Use a different stack name with the same config to avoid singleton conflicts
stackName = "tunnel2";
JChannelFactory factory2 = new JChannelFactory();
factory2.setMultiplexerConfig(muxFile);
factory2.setNamingServicePort(1099);
factory2.setNodeName("node2");
factory2.setExposeChannels(false);
factory2.setExposeProtocols(false);
factory2.create();
factory2.start();
Configuration cacheConfig2 = new Configuration();
cacheConfig2.setMultiplexerStack(stackName);
cacheConfig2.setCacheMode("REPL_SYNC");
DependencyInjectedConfigurationRegistry registry2 = new DependencyInjectedConfigurationRegistry();
registry2.registerConfiguration("config2", cacheConfig2);
CacheManager cacheManager2 = new CacheManager(registry2, factory2);
HAPartitionCacheHandlerImpl cacheHandler2 = new HAPartitionCacheHandlerImpl();
cacheHandler2.setCacheManager(cacheManager2);
cacheHandler2.setCacheConfigName("config2");
DistributedStateImpl ds2 = new DistributedStateImpl();
ds2.setCacheHandler(cacheHandler2);
partition2 = new ClusterPartition();
partition2.setPartitionName(partitionName);
partition2.setCacheHandler(cacheHandler2);
partition2.setStateTransferTimeout(30000);
partition2.setMethodCallTimeout(60000);
partition2.setDistributedStateImpl(ds2);
partition2.setBindIntoJndi(false);
partition2.create();
partition2.start();
DistributedReplicantManager drm2 = partition2.getDistributedReplicantManager();
Thread.sleep(10000);
// confirm that each partition contains two nodes
assertEquals("Partition1 should contain two nodes; ", 2, partition1.getCurrentView().size());
assertEquals("Partition2 should contain two nodes; ", 2, partition2.getCurrentView().size());
drm1.add(SERVICEA, "valueA1");
drm2.add(SERVICEA, "valueA2");
drm2.add(SERVICEB, "valueB2");
// test that only one node is the master replica for serviceA
assertTrue("ServiceA must have a master replica",
drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
assertTrue("ServiceA must have a single master replica",
drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
// ServiceB should only be a master replica on partition2
assertFalse("ServiceB should not be a master replica on partition1",
drm1.isMasterReplica(SERVICEB));
assertTrue("ServiceB must have a master replica on partition2",
drm2.isMasterReplica(SERVICEB));
// confirm that each partition contains correct DRM replicants for services A and B
assertEquals("Partition1 should contain two DRM replicants for serviceA; ",
2, drm1.lookupReplicants(SERVICEA).size());
assertEquals("Partition2 should contain two DRM replicants for serviceA; ",
2, drm2.lookupReplicants(SERVICEA).size());
assertEquals("Partition1 should contain one DRM replicant for serviceB; ",
1, drm1.lookupReplicants(SERVICEB).size());
assertEquals("Partition2 should contain one DRM replicant for serviceB; ",
1, drm2.lookupReplicants(SERVICEB).size());
// simulate a split of the partition
log.info("DRMTestCase.testIsMasterReplica() - stopping GossipRouter");
router.stop();
sleepThread(15000);
// confirm that each partition contains one node
assertEquals("Partition1 should contain one node after split; ",
1, partition1.getCurrentView().size());
assertEquals("Partition2 should contain one node after split; ",
1, partition2.getCurrentView().size());
// confirm that each node is a master replica for serviceA after the split
assertTrue("ServiceA should be a master replica on partition1 after split",
drm1.isMasterReplica(SERVICEA));
assertTrue("ServiceA should be a master replica on partition2 after split",
drm2.isMasterReplica(SERVICEA));
// ServiceB should still only be a master replica on partition2 after split
assertFalse("ServiceB should not be a master replica on partition1 after split",
drm1.isMasterReplica(SERVICEB));
assertTrue("ServiceB must have a master replica on partition2 after split",
drm2.isMasterReplica(SERVICEB));
// Remove ServiceA replicant from partition1
drm1.remove(SERVICEA);
// test that this node is not the master replica
assertFalse("partition1 is not master replica after dropping ServiceA replicant",
drm1.isMasterReplica(SERVICEA));
//Restore the local replicant
drm1.add(SERVICEA, "valueA1a");
// simulate a merge
log.info("DRMTestCase.testIsMasterReplica() - restarting GossipRouter");
router.start();
// it seems to take more than 15 seconds for the merge to take effect
sleepThread(30000);
assertTrue(router.isStarted());
// confirm that each partition contains two nodes again
assertEquals("Partition1 should contain two nodes after merge; ",
2, partition1.getCurrentView().size());
assertEquals("Partition2 should contain two nodes after merge; ",
2, partition2.getCurrentView().size());
// test that only one node is the master replica for serviceA after merge
assertTrue("ServiceA must have a master replica after merge",
drm1.isMasterReplica(SERVICEA) || drm2.isMasterReplica(SERVICEA));
assertTrue("ServiceA must have a single master replica after merge",
drm1.isMasterReplica(SERVICEA) != drm2.isMasterReplica(SERVICEA));
// ServiceB should only be a master replica on partition2 after merge
assertFalse("ServiceB should not be a master replica on partition1 after merge",
drm1.isMasterReplica(SERVICEB));
assertTrue("ServiceB must have a master replica on partition2 after merge",
drm2.isMasterReplica(SERVICEB));
// confirm that each partition contains correct DRM replicants for services A and B after merge
assertEquals("Partition1 should contain two DRM replicants for serviceA after merge; ",
2, drm1.lookupReplicants(SERVICEA).size());
assertEquals("Partition2 should contain two DRM replicants for serviceA after merge; ",
2, drm2.lookupReplicants(SERVICEA).size());
assertEquals("Partition1 should contain one DRM replicant for serviceB after merge; ",
1, drm1.lookupReplicants(SERVICEB).size());
assertEquals("Partition2 should contain one DRM replicant for serviceB after merge; ",
1, drm2.lookupReplicants(SERVICEB).size());
partition1.stop();
partition2.stop();
}
finally