.getContentLength());
int parts = algorithm.getParts();
long chunkSize = algorithm.getChunkSize();
long remaining = algorithm.getRemaining();
if (parts > 0) {
AWSS3Client client = ablobstore
.getContext().unwrap(AWSS3ApiMetadata.CONTEXT_TOKEN).getApi();
String uploadId = null;
final Map<Integer, ListenableFuture<String>> futureParts =
new ConcurrentHashMap<Integer, ListenableFuture<String>>();
final Map<Integer, Exception> errorMap = Maps.newHashMap();
AtomicInteger errors = new AtomicInteger(0);
int maxRetries = Math.max(minRetries, parts * maxPercentRetries / 100);
int effectiveParts = remaining > 0 ? parts + 1 : parts;
try {
uploadId = client.initiateMultipartUpload(container,
ObjectMetadataBuilder.create().key(key).build()); // TODO md5
logger.debug(String.format("initiated multipart upload of %s to container %s" +
" with uploadId %s consisting from %s part (possible max. retries: %d)",
key, container, uploadId, effectiveParts, maxRetries));
// we need a bounded-blocking queue to control the amount of parallel jobs
ArrayBlockingQueue<Integer> activeParts = new ArrayBlockingQueue<Integer>(parallelDegree);
Queue<Part> toRetry = new ConcurrentLinkedQueue<Part>();
SortedMap<Integer, String> etags = new ConcurrentSkipListMap<Integer, String>();
CountDownLatch latch = new CountDownLatch(effectiveParts);
int part;
while ((part = algorithm.getNextPart()) <= parts) {
Integer partKey = Integer.valueOf(part);
activeParts.put(partKey);
prepareUploadPart(container, key, uploadId, partKey, payload,
algorithm.getNextChunkOffset(), chunkSize, etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
}
if (remaining > 0) {
Integer partKey = Integer.valueOf(part);
activeParts.put(partKey);
prepareUploadPart(container, key, uploadId, partKey, payload,
algorithm.getNextChunkOffset(), remaining, etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, latch);
}
latch.await();
// handling retries
while (errors.get() <= maxRetries && toRetry.size() > 0) {
int atOnce = Math.min(Math.min(toRetry.size(), errors.get()), parallelDegree);
CountDownLatch retryLatch = new CountDownLatch(atOnce);
for (int i = 0; i < atOnce; i++) {
Part failedPart = toRetry.poll();
Integer partKey = Integer.valueOf(failedPart.getPart());
activeParts.put(partKey);
prepareUploadPart(container, key, uploadId, partKey, payload,
failedPart.getOffset(), failedPart.getSize(), etags,
activeParts, futureParts, errors, maxRetries, errorMap, toRetry, retryLatch);
}
retryLatch.await();
}
if (errors.get() > maxRetries) {
throw new BlobRuntimeException(String.format(
"Too many failed parts: %s while multipart upload of %s to container %s with uploadId %s",
errors.get(), key, container, uploadId));
}
String eTag = client.completeMultipartUpload(container, key, uploadId, etags);
logger.debug(String.format("multipart upload of %s to container %s with uploadId %s" +
" successfully finished with %s retries", key, container, uploadId, errors.get()));
return eTag;
} catch (Exception ex) {
RuntimeException rtex = Throwables2.getFirstThrowableOfType(ex, RuntimeException.class);
if (rtex == null) {
rtex = new RuntimeException(ex);
}
for (Map.Entry<Integer, ListenableFuture<String>> entry : futureParts.entrySet()) {
entry.getValue().cancel(false);
}
if (uploadId != null) {
client.abortMultipartUpload(container, key, uploadId);
}
throw rtex;
}
} else {
// Issue 936: don't just call putBlob, as that will see options=multiPart and