final int numberOfRelocations = 1;
final int numberOfWriters = 2;
final String node1 = cluster().startNode();
BlobIndices blobIndices = cluster().getInstance(BlobIndices.class, node1);
logger.trace("--> creating test index ...");
Settings indexSettings = ImmutableSettings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
blobIndices.createBlobTable("test", indexSettings).get();
logger.trace("--> starting [node2] ...");
final String node2 = cluster().startNode();
ensureGreen();
final AtomicLong idGenerator = new AtomicLong();
final AtomicLong indexCounter = new AtomicLong();
final AtomicBoolean stop = new AtomicBoolean(false);
Thread[] writers = new Thread[numberOfWriters];
final CountDownLatch stopLatch = new CountDownLatch(writers.length);
logger.trace("--> starting {} blob upload threads", writers.length);
final List<String> uploadedDigests = Collections.synchronizedList(new ArrayList<String>(writers.length));
for (int i = 0; i < writers.length; i++) {
final int indexerId = i;
writers[i] = new Thread() {
@Override
public void run() {
try {
logger.trace("**** starting blob upload thread {}", indexerId);
while (!stop.get()) {
long id = idGenerator.incrementAndGet();
String digest = uploadFile(cluster().client(node1), genFile(id));
uploadedDigests.add(digest);
indexCounter.incrementAndGet();
}
logger.trace("**** done indexing thread {}", indexerId);
} catch (Exception e) {
logger.warn("**** failed indexing thread {}", e, indexerId);
} finally {
stopLatch.countDown();
}
}
};
writers[i].start();
}
logger.trace("--> waiting for 2 blobs to be uploaded ...");
while (uploadedDigests.size() < 2) {
Thread.sleep(10);
}
logger.trace("--> 2 blobs uploaded");
// increase time between chunks in order to make sure that the upload is taking place while relocating
timeBetweenChunks.set(10);
stop.set(true);
logger.trace("--> starting relocations...");
for (int i = 0; i < numberOfRelocations; i++) {
String fromNode = (i % 2 == 0) ? node1 : node2;
String toNode = node1.equals(fromNode) ? node2 : node1;
logger.trace("--> START relocate the shard from {} to {}", fromNode, toNode);
cluster().client(node1).admin().cluster().prepareReroute()
.add(new MoveAllocationCommand(new ShardId(BlobIndices.fullIndexName("test"), 0), fromNode, toNode))
.execute().actionGet();
ClusterHealthResponse clusterHealthResponse = cluster().client(node1).admin().cluster()
.prepareHealth()
.setWaitForEvents(Priority.LANGUID)
.setWaitForRelocatingShards(0)
.setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
clusterHealthResponse = cluster().client(node2).admin().cluster()
.prepareHealth()
.setWaitForEvents(Priority.LANGUID)
.setWaitForRelocatingShards(0)
.setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.trace("--> DONE relocate the shard from {} to {}", fromNode, toNode);
}
logger.trace("--> done relocations");
logger.trace("--> marking and waiting for upload threads to stop ...");
timeBetweenChunks.set(0);
stop.set(true);
stopLatch.await(60, TimeUnit.SECONDS);
logger.trace("--> uploading threads stopped");
logger.trace("--> expected {} got {}", indexCounter.get(), uploadedDigests.size());
assertEquals(indexCounter.get(), uploadedDigests.size());
blobIndices = cluster().getInstance(BlobIndices.class, node2);
for (String digest : uploadedDigests) {
BlobShard blobShard = blobIndices.localBlobShard(BlobIndices.fullIndexName("test"), digest);
long length = blobShard.blobContainer().getFile(digest).length();
assertThat(length, greaterThanOrEqualTo(1L));
}
}