// rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
// max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272).
int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB()
/ (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
delivery:
while (true)
{
QueryFilter filter = QueryFilter.getSliceFilter(epkey,
new QueryPath(SystemTable.HINTS_CF),
startColumn,
ByteBufferUtil.EMPTY_BYTE_BUFFER,
false,
pageSize);
ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter),
(int) (System.currentTimeMillis() / 1000));
if (pagingFinished(hintsPage, startColumn))
break;
// check if node is still alive and we should continue delivery process
if (!FailureDetector.instance.isAlive(endpoint))
{
logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
return;
}
List<WriteResponseHandler> responseHandlers = Lists.newArrayList();
for (final IColumn hint : hintsPage.getSortedColumns())
{
// check if hints delivery has been paused during the process
if (hintedHandOffPaused)
{
logger.debug("Hints delivery process is paused, aborting");
break delivery;
}
// Skip tombstones:
// if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
// in which the local deletion timestamp was generated on the last column in the old page, in which
// case the hint will have no columns (since it's deleted) but will still be included in the resultset
// since (even with gcgs=0) it's still a "relevant" tombstone.
if (!hint.isLive())
continue;
startColumn = hint.name();
ByteBuffer[] components = comparator.split(hint.name());
int version = Int32Type.instance.compose(components[1]);
DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
RowMutation rm;
try
{
rm = RowMutation.serializer.deserialize(in, version);
}
catch (UnknownColumnFamilyException e)
{
logger.debug("Skipping delivery of hint for deleted columnfamily", e);
deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
continue;
}
catch (IOException e)
{
throw new AssertionError(e);
}
Map<UUID, Long> truncationTimesCache = new HashMap<UUID, Long>();
for (UUID cfId : ImmutableSet.copyOf((rm.getColumnFamilyIds())))
{
Long truncatedAt = truncationTimesCache.get(cfId);
if (truncatedAt == null)
{
ColumnFamilyStore cfs = Table.open(rm.getTable()).getColumnFamilyStore(cfId);
truncatedAt = cfs.getTruncationTime();
truncationTimesCache.put(cfId, truncatedAt);
}
if (hint.maxTimestamp() < truncatedAt)
{
logger.debug("Skipping delivery of hint for truncated columnfamily {}" + cfId);
rm = rm.without(cfId);
}
}
if (rm.isEmpty())
{
deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
continue;
}
MessageOut<RowMutation> message = rm.createMessage();
rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
Runnable callback = new Runnable()
{
public void run()
{
rowsReplayed.incrementAndGet();