.getContentLength());
int parts = algorithm.getParts();
long chunkSize = algorithm.getChunkSize();
long remaining = algorithm.getRemaining();
if (parts > 0) {
final CommonSwiftClient client = blobstore.getContext().unwrapApi(CommonSwiftClient.class);
final Map<Integer, ListenableFuture<String>> futureParts =
new ConcurrentHashMap<Integer, ListenableFuture<String>>();
final Map<Integer, Exception> errorMap = Maps.newHashMap();
AtomicInteger errors = new AtomicInteger(0);
int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
int effectiveParts = remaining > 0 ? parts + 1 : parts;
try {
logger.debug(String.format("initiated multipart upload of %s to container %s" +
" consisting from %s part (possible max. retries: %d)",
key, container, effectiveParts, maxRetries));
// we need a bounded-blocking queue to control the amount of parallel jobs
ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
CountDownLatch latch = new CountDownLatch(effectiveParts);
int part;
while ((part = algorithm.getNextPart()) <= parts) {
Integer partKey = Integer.valueOf(part);
activeParts.put(partKey);
prepareUploadPart(container, blob, key, partKey, payload,
algorithm.getNextChunkOffset(), chunkSize, etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch,
blob2Object);
}
if (remaining > 0) {
Integer partKey = Integer.valueOf(part);
activeParts.put(partKey);
prepareUploadPart(container, blob, key, partKey, payload,
algorithm.getNextChunkOffset(), remaining, etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch,
blob2Object);
}
latch.await();
// handling retries
while (errors.get() <= maxRetries && !toRetry.isEmpty()) {
int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
CountDownLatch retryLatch = new CountDownLatch(atOnce);
for (int i = 0; i < atOnce; i++) {
Part failedPart = toRetry.poll();
Integer partKey = Integer.valueOf(failedPart.getPart());
activeParts.put(partKey);
prepareUploadPart(container, blob, key, partKey, payload,
failedPart.getOffset(), failedPart.getSize(), etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch,
blob2Object);
}
retryLatch.await();
}
if (errors.get() > maxRetries) {
throw new BlobRuntimeException(String.format(
"Too many failed parts: %s while multipart upload of %s to container %s",
errors.get(), key, container));
}
String eTag = client.putObjectManifest(container, key);
logger.debug(String.format("multipart upload of %s to container %s" +
" successfully finished with %s retries", key, container, errors.get()));
return eTag;
} catch (Exception ex) {
RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);