if (immediateLockForWriting(bucketId)) {
if (log.isTraceEnabled()) {
log.tracef("Adding bucket keyed %s for purging.", bucketId);
}
InputStream binaryStream = rs.getBinaryStream(1);
Bucket bucket = unmarshallBucket(binaryStream);
bucket.setBucketId(bucketId);
expiredBuckets.add(bucket);
if (expiredBuckets.size() == BATCH_SIZE) {
++tasksScheduled;
ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
expiredBuckets = new ArrayList<Bucket>(BATCH_SIZE);
}
} else {
if (log.isTraceEnabled()) {
log.tracef("Could not acquire write lock for %s, this won't be purged even though it has expired elements", bucketId);
}
}
// continuously unlock already purged buckets - we don't want to run out of memory by storing
// them in unlimited collection
tasksCompleted += unlockCompleted(ecs, false); // cannot throw InterruptedException
}
if (!expiredBuckets.isEmpty()) {
++tasksScheduled;
ecs.submit(new BucketPurger(expiredBuckets, task, ctx.getMarshaller(), conn, emptyBuckets));
}
// wait until all tasks have completed
try {
while (tasksCompleted < tasksScheduled) {
tasksCompleted += unlockCompleted(ecs, true);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new PersistenceException("Interrupted purging JdbcBinaryStore", e);
}
// when all tasks have completed, we may have up to BATCH_SIZE empty buckets waiting to be deleted
PreparedStatement deletePs = null;
try {
deletePs = conn.prepareStatement(tableManipulation.getDeleteRowSql());
Bucket bucket;
while ((bucket = emptyBuckets.poll()) != null) {
deletePs.setString(1, bucket.getBucketIdAsString());
deletePs.addBatch();
unlock(bucket.getBucketId());
}
log.tracef("Flushing deletion batch");
deletePs.executeBatch();
log.tracef("Flushed deletion batch");
} catch (Exception ex) {