public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object");
String bucketName = putObjectRequest.getBucketName();
String key = putObjectRequest.getKey();
ObjectMetadata metadata = putObjectRequest.getMetadata();
InputStream input = putObjectRequest.getInputStream();
if (metadata == null)
metadata = new ObjectMetadata();
assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
assertParameterNotNull(key, "The key parameter must be specified when uploading an object");
final boolean skipContentMd5Check = skipContentMd5IntegrityCheck(putObjectRequest);
// If a file is specified for upload, we need to pull some additional
// information from it to auto-configure a few options
if (putObjectRequest.getFile() != null) {
File file = putObjectRequest.getFile();
// Always set the content length, even if it's already set
metadata.setContentLength(file.length());
final boolean calculateMD5 = metadata.getContentMD5() == null;
// Only set the content type if it hasn't already been set
if (metadata.getContentType() == null) {
metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
}
if (calculateMD5 && !skipContentMd5Check) {
try {
String contentMd5_b64 = Md5Utils.md5AsBase64(file);
metadata.setContentMD5(contentMd5_b64);
} catch (Exception e) {
throw new AmazonClientException(
"Unable to calculate MD5 hash: " + e.getMessage(), e);
}
}
try {
input = new RepeatableFileInputStream(file);
} catch (FileNotFoundException fnfe) {
throw new AmazonClientException("Unable to find file to upload", fnfe);
}
}
Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);
if ( putObjectRequest.getAccessControlList() != null) {
addAclHeaders(request, putObjectRequest.getAccessControlList());
} else if ( putObjectRequest.getCannedAcl() != null ) {
request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
}
if (putObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
}
if (putObjectRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
if (input == null) {
input = new ByteArrayInputStream(new byte[0]);
}
}
// Populate the SSE-CPK parameters to the request header
populateSseCpkRequestParameters(request, putObjectRequest.getSSECustomerKey());
// Use internal interface to differentiate 0 from unset.
final Long contentLength = (Long)metadata.getRawMetadataValue(Headers.CONTENT_LENGTH);
if (contentLength == null) {
/*
* There's nothing we can do except for let the HTTP client buffer
* the input stream contents if the caller doesn't tell us how much
* data to expect in a stream since we have to explicitly tell
* Amazon S3 how much we're sending before we start sending any of
* it.
*/
log.warn("No content length specified for stream data. " +
"Stream contents will be buffered in memory and could result in " +
"out of memory errors.");
} else {
final long expectedLength = contentLength.longValue();
if (expectedLength >= 0) {
// Performs length check on the underlying data stream.
// For S3 encryption client, the underlying data stream here
// refers to the cipher-text data stream (ie not the underlying
// plain-text data stream which in turn may have been wrapped
// with it's own length check input stream.)
@SuppressWarnings("resource")
LengthCheckInputStream lcis = new LengthCheckInputStream(
input,
expectedLength, // expected data length to be uploaded
EXCLUDE_SKIPPED_BYTES);
input = lcis;
}
}
if (!input.markSupported()) {
int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
if (bufferSizeOverride != null) {
try {
streamBufferSize = Integer.parseInt(bufferSizeOverride);
} catch (Exception e) {
log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
}
}
input = new RepeatableInputStream(input, streamBufferSize);
}
MD5DigestCalculatingInputStream md5DigestStream = null;
if (metadata.getContentMD5() == null
&& !skipContentMd5Check ) {
/*
* If the user hasn't set the content MD5, then we don't want to
* buffer the whole stream in memory just to calculate it. Instead,
* we can calculate it on the fly and validate it with the returned
* ETag from the object upload.
*/
input = md5DigestStream = new MD5DigestCalculatingInputStream(input);
}
if (metadata.getContentType() == null) {
/*
* Default to the "application/octet-stream" if the user hasn't
* specified a content type.
*/
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
}
populateRequestMetadata(request, metadata);
request.setContent(input);
final ProgressListener listener = putObjectRequest.getGeneralProgressListener();
publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
ObjectMetadata returnedMetadata = null;
try {
returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
} catch (AmazonClientException ace) {
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw ace;
} finally {
try {
input.close();
} catch (AbortedException ignore) {
} catch (Exception e) {
log.debug("Unable to cleanly close input stream: " + e.getMessage(), e);
}
}
String contentMd5 = metadata.getContentMD5();
if (md5DigestStream != null) {
contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
}
if (returnedMetadata != null && contentMd5 != null && !skipContentMd5Check) {
byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());
if (!Arrays.equals(clientSideHash, serverSideHash)) {
publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
throw new AmazonClientException("Unable to verify integrity of data upload. " +
"Client calculated content hash didn't match hash calculated by Amazon S3. " +
"You may need to delete the data stored in Amazon S3.");
}
}
publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
PutObjectResult result = new PutObjectResult();
result.setETag(returnedMetadata.getETag());
result.setVersionId(returnedMetadata.getVersionId());
result.setSSEAlgorithm(returnedMetadata.getSSEAlgorithm());
result.setSSECustomerAlgorithm(returnedMetadata.getSSECustomerAlgorithm());
result.setSSECustomerKeyMd5(returnedMetadata.getSSECustomerKeyMd5());
result.setExpirationTime(returnedMetadata.getExpirationTime());
result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
result.setContentMd5(contentMd5);
return result;
}