// In practice, we can only guarantee those 2 points if the filter is one that queries the head of the
// partition (and if that filter actually counts CQL3 rows since that's what we cache and it would be
// bogus to compare the filter count to the 'rows to cache' otherwise).
if (filter.filter.isHeadFilter() && filter.filter.countCQL3Rows(metadata.comparator))
{
SliceQueryFilter sliceFilter = (SliceQueryFilter)filter.filter;
int rowsToCache = metadata.getCaching().rowCache.rowsToCache;
SliceQueryFilter cacheSlice = readFilterForCache();
QueryFilter cacheFilter = new QueryFilter(filter.key, name, cacheSlice, filter.timestamp);
// If the filter count is less than the number of rows cached, we simply extend it to make sure we do cover the
// number of rows to cache, and if that count is greater than the number of rows to cache, we simply filter what
// needs to be cached afterwards.
if (sliceFilter.count < rowsToCache)
{
toCache = getTopLevelColumns(cacheFilter, Integer.MIN_VALUE);
if (toCache != null)
{
Tracing.trace("Populating row cache ({} rows cached)", cacheSlice.lastCounted());
data = filterColumnFamily(toCache, filter);
}
}
else
{
data = getTopLevelColumns(filter, Integer.MIN_VALUE);
if (data != null)
{
// The filter limit was greater than the number of rows to cache. But, if the filter had a non-empty
// finish bound, we may have gotten less than what needs to be cached, in which case we shouldn't cache it
// (otherwise a cache hit would assume the whole partition is cached which is not the case).
if (sliceFilter.finish().isEmpty() || sliceFilter.lastCounted() >= rowsToCache)
{
toCache = filterColumnFamily(data, cacheFilter);
Tracing.trace("Caching {} rows (out of {} requested)", cacheSlice.lastCounted(), sliceFilter.count);
}
else
{
Tracing.trace("Not populating row cache, not enough rows fetched ({} fetched but {} required for the cache)", sliceFilter.lastCounted(), rowsToCache);
}