boolean terminatedEarly = false;
Date startTime = new Date();
logger.info("Started streaming slop pusher job at " + startTime);
SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;
if(adminClient == null) {
adminClient = new AdminClient(cluster,
new AdminClientConfig().setMaxConnectionsPerNode(1),
new ClientConfig());
}
if(voldemortConfig.getSlopZonesDownToTerminate() > 0) {
// Populating the zone mapping for early termination
zoneMapping.clear();
for(Node n: cluster.getNodes()) {
if(failureDetector.isAvailable(n)) {
Set<Integer> nodes = zoneMapping.get(n.getZoneId());
if(nodes == null) {
nodes = Sets.newHashSet();
zoneMapping.put(n.getZoneId(), nodes);
}
nodes.add(n.getId());
}
}
// Check how many zones are down
int zonesDown = 0;
for(Zone zone: cluster.getZones()) {
if(zoneMapping.get(zone.getId()) == null
|| zoneMapping.get(zone.getId()).size() == 0)
zonesDown++;
}
// Terminate early
if(voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size()
&& zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
logger.info("Completed streaming slop pusher job at " + startTime
+ " early because " + zonesDown + " zones are down");
stopAdminClient();
return;
}
}
// Clearing the statistics
AtomicLong attemptedPushes = new AtomicLong(0);
for(Node node: cluster.getNodes()) {
attemptedByNode.put(node.getId(), 0L);
succeededByNode.put(node.getId(), 0L);
}
Set<String> storeNames = StoreDefinitionUtils.getStoreNamesSet(metadataStore.getStoreDefList());
acquireRepairPermit();
try {
StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
iterator = slopStore.entries();
while(iterator.hasNext()) {
Pair<ByteArray, Versioned<Slop>> keyAndVal;
try {
keyAndVal = iterator.next();
Versioned<Slop> versioned = keyAndVal.getSecond();
// Track the scan progress
if(this.streamStats != null) {
this.streamStats.reportStreamingSlopScan();
}
// Retrieve the node
int nodeId = versioned.getValue().getNodeId();
// check for dead slops
if(isSlopDead(cluster, storeNames, versioned.getValue())) {
handleDeadSlop(slopStorageEngine, keyAndVal);
// Move on to the next slop. we either delete it or
// ignore it.
continue;
}
Node node = cluster.getNodeById(nodeId);
attemptedPushes.incrementAndGet();
Long attempted = attemptedByNode.get(nodeId);
attemptedByNode.put(nodeId, attempted + 1L);
if(attemptedPushes.get() % 10000 == 0)
logger.info("Attempted pushing " + attemptedPushes + " slops");
if(logger.isTraceEnabled())
logger.trace("Pushing slop for " + versioned.getValue().getNodeId()
+ " and store " + versioned.getValue().getStoreName()
+ " of key: " + versioned.getValue().getKey());
if(failureDetector.isAvailable(node)) {
SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
if(slopQueue == null) {
// No previous slop queue, add one
slopQueue = new SynchronousQueue<Versioned<Slop>>();
slopQueues.put(nodeId, slopQueue);
consumerResults.add(consumerExecutor.submit(new SlopConsumer(nodeId,
slopQueue,
slopStorageEngine)));
}
boolean offered = slopQueue.offer(versioned,
voldemortConfig.getClientRoutingTimeoutMs(),
TimeUnit.MILLISECONDS);
if(!offered) {
if(logger.isDebugEnabled())
logger.debug("No consumer appeared for slop in "
+ voldemortConfig.getClientConnectionTimeoutMs()
+ " ms");
}
readThrottler.maybeThrottle(nBytesRead(keyAndVal));
} else {
logger.trace(node + " declared down, won't push slop");
}
} catch(RejectedExecutionException e) {
throw new VoldemortException("Ran out of threads in executor", e);
}
}
} catch(InterruptedException e) {
logger.warn("Interrupted exception", e);
terminatedEarly = true;
} catch(Exception e) {
logger.error(e, e);
terminatedEarly = true;
} finally {
try {
if(iterator != null)
iterator.close();
} catch(Exception e) {
logger.warn("Failed to close iterator cleanly as database might be closed", e);
}
// Adding the poison pill
for(SynchronousQueue<Versioned<Slop>> slopQueue: slopQueues.values()) {
try {
slopQueue.put(END);
} catch(InterruptedException e) {
logger.warn("Error putting poison pill", e);
}
}
for(Future result: consumerResults) {
try {
result.get();
} catch(Exception e) {
logger.warn("Exception in consumer", e);
}
}
// Only if exception didn't take place do we update the counts
if(!terminatedEarly) {
Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
for(int nodeId: succeededByNode.keySet()) {
logger.info("Slops to node " + nodeId + " - Succeeded - "
+ succeededByNode.get(nodeId) + " - Attempted - "
+ attemptedByNode.get(nodeId));
outstanding.put(nodeId,
attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
}
slopStorageEngine.resetStats(outstanding);
logger.info("Completed streaming slop pusher job which started at " + startTime);
} else {
for(int nodeId: succeededByNode.keySet()) {
logger.info("Slops to node " + nodeId + " - Succeeded - "
+ succeededByNode.get(nodeId) + " - Attempted - "