If the logger is currently enabled for the FINEST message level then the given message is forwarded to all the registered output Handler objects.
@param msg The string message (or a key in the message catalog)
7172737475767778798081
ILogger logger = getLogger(); if (!nodeEngine.getThisAddress().equals(owner)) { if (logger.isFinestEnabled()) { logger.finest("Wrong target! " + toString() + " cannot be processed! Target should be: " + owner); } sendRetryResponse(); return; }
8485868788899091929394
return; } if (!partitionService.incrementReplicaSyncProcessCount()) { if (logger.isFinestEnabled()) { logger.finest( "Max parallel replication process limit exceeded! Could not run replica sync -> " + toString()); } sendRetryResponse(); return; }
161162163164165166167168169170171
ReplicaSyncResponse syncResponse = createResponse(data); Address target = getCallerAddress(); ILogger logger = getLogger(); if (logger.isFinestEnabled()) { logger.finest("Sending sync response to -> " + target + " for partition: " + getPartitionId() + ", replica: " + getReplicaIndex()); } OperationService operationService = nodeEngine.getOperationService(); operationService.send(syncResponse, target); }
189190191192193194195196197198199
private void logNoReplicaDataFound(int partitionId, int replicaIndex) { NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine(); ILogger logger = nodeEngine.getLogger(getClass()); if (logger.isFinestEnabled()) { logger.finest("No replica data is found for partition: " + partitionId + ", replica: " + replicaIndex); } } @Override public void afterRun() throws Exception {
7071727374757677787980
final Address owner = partition.getReplicaAddress(getReplicaIndex()); if (!nodeEngine.getThisAddress().equals(owner)) { valid = false; final ILogger logger = getLogger(); if (logger.isFinestEnabled()) { logger.finest("Wrong target! " + toString() + " cannot be processed! Target should be: " + owner); } } } @Override
105106107108109110111112113114115
private void logApplyReplicaSync(int partitionId, int replicaIndex) { NodeEngineImpl nodeEngine = (NodeEngineImpl) getNodeEngine(); ILogger logger = nodeEngine.getLogger(getClass()); if (logger.isFinestEnabled()) { logger.finest("Applying replica sync for partition: " + partitionId + ", replica: " + replicaIndex); } } @Override
8081828384858687888990
for (Future future : futures) { try { future.get(AWAIT_COMPLETION_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (TimeoutException e) { logger.finest(e); } catch (Exception e) { logger.warning(e); } } }
8182838485868788899091
private void awaitCompletion(Collection<Future> futures) { try { FutureUtil.waitWithDeadline(futures, AWAIT_COMPLETION_TIMEOUT_SECONDS, TimeUnit.SECONDS, exceptionHandler); } catch (TimeoutException e) { ILogger logger = nodeEngine.getLogger(getClass()); logger.finest(e); } } private InternalCompletableFuture invoke(Operation operation, Data key) { int partitionId = nodeEngine.getPartitionService().getPartitionId(key);
116117118119120121122123124125126
if (throwable instanceof ExecutionException) { final ClusterServiceImpl clusterService = getService(); final NodeEngineImpl nodeEngine = clusterService.getNodeEngine(); final ILogger logger = nodeEngine.getLogger(FinalizeJoinOperation.class); if (logger.isFinestEnabled()) { logger.finest("Error while executing post-join operations -> " + throwable.getClass().getSimpleName() + "[" + throwable.getMessage() + "]", throwable); } } } }
402403404405406407408409410411412
mapReduceService.processRequest(address, operation, name); } catch (Exception ignore) { // We can ignore this exception since we just want to cancel the job // and the member may be crashed or unreachable in some way ILogger logger = mapReduceService.getNodeEngine().getLogger(JobSupervisor.class); logger.finest("Remote node may already be down", ignore); } } } private void processReducerFinished(final ReducingFinishedNotification notification) {