throws AmazonClientException, AmazonServiceException {
assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object");
String bucketName = putObjectRequest.getBucketName();
String key = putObjectRequest.getKey();
ObjectMetadata metadata = putObjectRequest.getMetadata();
InputStream input = putObjectRequest.getInputStream();
ProgressListener progressListener = putObjectRequest.getProgressListener();
if (metadata == null) metadata = new ObjectMetadata();
assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
assertParameterNotNull(key, "The key parameter must be specified when uploading an object");
// If a file is specified for upload, we need to pull some additional
// information from it to auto-configure a few options
if (putObjectRequest.getFile() != null) {
File file = putObjectRequest.getFile();
// Always set the content length, even if it's already set
metadata.setContentLength(file.length());
// Only set the content type if it hasn't already been set
if (metadata.getContentType() == null) {
metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
}
FileInputStream fileInputStream = null;
try {
fileInputStream = new FileInputStream(file);
byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
} catch (Exception e) {
throw new AmazonClientException(
"Unable to calculate MD5 hash: " + e.getMessage(), e);
} finally {
try {fileInputStream.close();} catch (Exception e) {}
}
try {
input = new RepeatableFileInputStream(file);
} catch (FileNotFoundException fnfe) {
throw new AmazonClientException("Unable to find file to upload", fnfe);
}
}
Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);
if ( putObjectRequest.getAccessControlList() != null) {
addAclHeaders(request, putObjectRequest.getAccessControlList());
} else if ( putObjectRequest.getCannedAcl() != null ) {
request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
}
if (putObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
}
if (putObjectRequest.getRedirectLocation() != null) {
request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
if (input == null) {
input = new ByteArrayInputStream(new byte[0]);
}
}
// Use internal interface to differentiate 0 from unset.
if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
/*
* There's nothing we can do except for let the HTTP client buffer
* the input stream contents if the caller doesn't tell us how much
* data to expect in a stream since we have to explicitly tell
* Amazon S3 how much we're sending before we start sending any of
* it.
*/
log.warn("No content length specified for stream data. " +
"Stream contents will be buffered in memory and could result in " +
"out of memory errors.");
}
if (progressListener != null) {
input = new ProgressReportingInputStream(input, progressListener);
fireProgressEvent(progressListener, ProgressEvent.STARTED_EVENT_CODE);
}
if (!input.markSupported()) {
int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
if (bufferSizeOverride != null) {
try {
streamBufferSize = Integer.parseInt(bufferSizeOverride);
} catch (Exception e) {
log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
}
}
input = new RepeatableInputStream(input, streamBufferSize);
}
MD5DigestCalculatingInputStream md5DigestStream = null;
if (metadata.getContentMD5() == null) {
/*
* If the user hasn't set the content MD5, then we don't want to
* buffer the whole stream in memory just to calculate it. Instead,
* we can calculate it on the fly and validate it with the returned
* ETag from the object upload.
*/
try {
md5DigestStream = new MD5DigestCalculatingInputStream(input);
input = md5DigestStream;
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate " +
"checksum and verify data integrity.", e);
}
}
if (metadata.getContentType() == null) {
/*
* Default to the "application/octet-stream" if the user hasn't
* specified a content type.
*/
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
}
populateRequestMetadata(request, metadata);
request.setContent(input);
ObjectMetadata returnedMetadata = null;
try {
returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
} catch (AmazonClientException ace) {
fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
throw ace;
} finally {
try {input.close();} catch (Exception e) {
log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
}
}
String contentMd5 = metadata.getContentMD5();
if (md5DigestStream != null) {
contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
}
if (returnedMetadata != null && contentMd5 != null) {
byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());
if (!Arrays.equals(clientSideHash, serverSideHash)) {
fireProgressEvent(progressListener, ProgressEvent.FAILED_EVENT_CODE);
throw new AmazonClientException("Unable to verify integrity of data upload. " +
"Client calculated content hash didn't match hash calculated by Amazon S3. " +
"You may need to delete the data stored in Amazon S3.");
}
}
fireProgressEvent(progressListener, ProgressEvent.COMPLETED_EVENT_CODE);
PutObjectResult result = new PutObjectResult();
result.setETag(returnedMetadata.getETag());
result.setVersionId(returnedMetadata.getVersionId());
result.setServerSideEncryption(returnedMetadata.getServerSideEncryption());
result.setExpirationTime(returnedMetadata.getExpirationTime());
result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId());
return result;
}