Map<Integer, Set<Pair<Integer, Integer>>> buckets = ROTestUtils.getNodeIdToAllPartitions(cluster,
StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs,
"test-readonly-fetchfiles"),
true);
for(Node node: cluster.getNodes()) {
ReadOnlyStorageEngine store = (ReadOnlyStorageEngine) getStore(node.getId(),
"test-readonly-fetchfiles");
// Create list of buckets ( replica to partition )
Set<Pair<Integer, Integer>> nodeBucketsSet = buckets.get(node.getId());
HashMap<Integer, List<Integer>> nodeBuckets = ROTestUtils.flattenPartitionTuples(nodeBucketsSet);
// Split the buckets into primary and replica buckets
HashMap<Integer, List<Integer>> primaryNodeBuckets = Maps.newHashMap();
primaryNodeBuckets.put(0, nodeBuckets.get(0));
int primaryPartitions = nodeBuckets.get(0).size();
HashMap<Integer, List<Integer>> replicaNodeBuckets = Maps.newHashMap(nodeBuckets);
replicaNodeBuckets.remove(0);
int replicaPartitions = 0;
for(List<Integer> partitions: replicaNodeBuckets.values()) {
replicaPartitions += partitions.size();
}
// Generate data...
File newVersionDir = new File(store.getStoreDirPath(), "version-"
+ Long.toString(versionId));
Utils.mkdirs(newVersionDir);
generateROFiles(numChunks, indexSize, dataSize, nodeBuckets, newVersionDir);
// Swap it...
store.swapFiles(newVersionDir.getAbsolutePath());
// Check if everything got mmap-ed correctly...
HashMap<Object, Integer> chunkIdToNumChunks = store.getChunkedFileSet()
.getChunkIdToNumChunks();
for(Object bucket: chunkIdToNumChunks.keySet()) {
Pair<Integer, Integer> partitionToReplicaBucket = (Pair<Integer, Integer>) bucket;
Pair<Integer, Integer> replicaToPartitionBucket = Pair.create(partitionToReplicaBucket.getSecond(),
partitionToReplicaBucket.getFirst());