final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl,
maxParallel,
finalCluster);
populateData(currentCluster, rwStoreDefWithReplication);
final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
// the plan would cause these partitions to move:
// Partition : Donor -> stealer
//
// p2 (Z-SEC) : s1 -> s0
// p3-6 (Z-PRI) : s1 -> s2
// p7 (Z-PRI) : s0 -> s2
//
// p5 (Z-SEC): s4 -> s3
// p6 (Z-PRI): s4 -> s5
//
// :. rebalancing will run on servers 0, 2, 3, & 5
final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient,
1,
rwStoreDefWithReplication.getName(),
Arrays.asList(6),
20);
assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
for(String key: baselineTuples.keySet()) {
baselineVersions.put(key, new VectorClock());
}
final CountDownLatch latch = new CountDownLatch(2);
// start get operation.
executors.execute(new Runnable() {
@Override
public void run() {
SocketStoreClientFactory factory = null;
try {
// wait for the rebalancing to begin
List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0),
serverMap.get(2),
serverMap.get(3),
serverMap.get(5));
while(!rebalancingComplete.get()) {
Iterator<VoldemortServer> serverIterator = serverList.iterator();
while(serverIterator.hasNext()) {
VoldemortServer server = serverIterator.next();
if(ByteUtils.getString(server.getMetadataStore()
.get(MetadataStore.SERVER_STATE_KEY,
null)
.get(0)
.getValue(),
"UTF-8")
.compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
logger.info("Server " + server.getIdentityNode().getId()
+ " transitioned into REBALANCING MODE");
serverIterator.remove();
}
}
if(serverList.size() == 0) {
rebalancingStarted.set(true);
break;
}
}
if(rebalancingStarted.get()) {
factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster,
0))
.setEnableLazy(false)
.setSocketTimeout(120,
TimeUnit.SECONDS)
.setClientZoneId(1));
final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW,
null,
factory,
3);
// Now perform some writes and determine the end
// state of the changed keys. Initially, all data
// now with zero vector clock
for(ByteArray movingKey: movingKeysList) {
try {
String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
String valStr = "proxy_write";
storeClientRW.put(keyStr, valStr);
baselineTuples.put(keyStr, valStr);
// all these keys will have [5:1] vector
// clock is node 5 is the new pseudo master
baselineVersions.get(keyStr)
.incrementVersion(5, System.currentTimeMillis());
proxyWritesDone.set(true);
if(rebalancingComplete.get()) {
break;
}
} catch(InvalidMetadataException e) {
// let this go
logger.error("Encountered an invalid metadata exception.. ", e);
}
}
}
} catch(Exception e) {
logger.error("Exception in proxy write thread..", e);
exceptions.add(e);
} finally {
if(factory != null)
factory.close();
latch.countDown();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
rebalanceKit.rebalance();
} catch(Exception e) {
logger.error("Error in rebalancing... ", e);
exceptions.add(e);
} finally {
rebalancingComplete.set(true);
latch.countDown();
}
}
});
latch.await();
executors.shutdown();
executors.awaitTermination(300, TimeUnit.SECONDS);
assertEquals("Client did not see all server transition into rebalancing state",
rebalancingStarted.get(),
true);
assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
checkEntriesPostRebalance(updatedCurrentCluster,
finalCluster,
Lists.newArrayList(rwStoreDefWithReplication),
Arrays.asList(0, 1, 2, 3, 4, 5),
baselineTuples,
baselineVersions);
checkConsistentMetadata(finalCluster, serverList);
// check No Exception
if(exceptions.size() > 0) {
for(Exception e: exceptions) {
e.printStackTrace();
}
fail("Should not see any exceptions.");
}
// check that the proxy writes were made to the original donor, node
// 1
List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
for(Integer nodeid: serverList)
clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
for(Integer nodeid: serverList)
adminClient.metadataMgmtOps.updateRemoteCluster(nodeid,
currentCluster,
clusterXmlClock);
adminClient.setAdminClientCluster(currentCluster);
checkForTupleEquivalence(adminClient,
1,
testStoreNameRW,
movingKeysList,
baselineTuples,