return result;
}
@Override
public Oak getOak(int clusterId) throws Exception {
MongoConnection mongo = new MongoConnection(host, port, dbName);
BlobStore blobStore = getBlobStore();
DocumentMK.Builder mkBuilder = new DocumentMK.Builder().
setMongoDB(mongo.getDB()).
memoryCacheSize(cacheSize).
setClusterId(clusterId).setLogging(false);
if (blobStore != null) {
mkBuilder.setBlobStore(blobStore);
}
DocumentMK dmk = mkBuilder.open();
Oak oak;
if (useMk) {
oak = new Oak(new KernelNodeStore(dmk, cacheSize));
} else {
oak = new Oak(dmk.getNodeStore());
}
return oak;
}
@Override
public Oak[] setUpCluster(int n) throws Exception {
Oak[] cluster = new Oak[n];
kernels = new DocumentMK[cluster.length];
for (int i = 0; i < cluster.length; i++) {
MongoConnection mongo = new MongoConnection(host, port, dbName);
BlobStore blobStore = getBlobStore();
DocumentMK.Builder mkBuilder = new DocumentMK.Builder().
setMongoDB(mongo.getDB()).
memoryCacheSize(cacheSize).
setClusterId(i).setLogging(false);
if (blobStore != null) {
mkBuilder.setBlobStore(blobStore);
}
kernels[i] = mkBuilder.open();
Oak oak;
if (useMk) {
oak = new Oak(new KernelNodeStore(kernels[i], cacheSize));
} else {
oak = new Oak(kernels[i].getNodeStore());
}
cluster[i] = oak;
}
return cluster;
}
@Override
public void tearDownCluster() {
for (DocumentMK kernel : kernels) {
kernel.dispose();
}
if (dropDBAfterTest) {
try {
MongoConnection mongo =
new MongoConnection(host, port, dbName);
mongo.getDB().dropDatabase();
mongo.close();
if (blobStore instanceof CloudBlobStore) {
((CloudBlobStore) blobStore).deleteBucket();
} else if (blobStore instanceof DataStoreBlobStore) {
((DataStoreBlobStore) blobStore).clearInUse();
((DataStoreBlobStore) blobStore).deleteAllOlderThan(