try {
byte[] md5Hash = ServiceUtils.computeMD5Hash(new FileInputStream(file));
metadata.setContentMD5(ServiceUtils.toBase64(md5Hash));
} catch (Exception e) {
throw new AmazonClientException(
"Unable to calculate MD5 hash: " + e.getMessage(), e);
}
try {
input = new RepeatableFileInputStream(file);
} catch (FileNotFoundException fnfe) {
throw new AmazonClientException("Unable to find file to upload", fnfe);
}
}
Request<Void> request = createRequest(bucketName, key, putObjectRequest);
if (putObjectRequest.getCannedAcl() != null) {
request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
}
if (putObjectRequest.getStorageClass() != null) {
request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
}
if (metadata.getContentLength() <= 0) {
/*
* There's nothing we can do except for let the HTTP client buffer
* the input stream contents if the caller doesn't tell us how much
* data to expect in a stream since we have to explicitly tell
* Amazon S3 how much we're sending before we start sending any of
* it.
*/
log.warn("No content length specified for stream data. " +
"Stream contents will be buffered in memory and could result in " +
"out of memory errors.");
}
if (!input.markSupported()) {
input = new RepeatableInputStream(input, Constants.DEFAULT_STREAM_BUFFER_SIZE);
}
if (metadata.getContentMD5() == null) {
/*
* If the user hasn't set the content MD5, then we don't want to
* buffer the whole stream in memory just to calculate it. Instead,
* we can calculate it on the fly and validate it with the returned
* ETag from the object upload.
*/
try {
input = new MD5DigestCalculatingInputStream(input);
} catch (NoSuchAlgorithmException e) {
log.warn("No MD5 digest algorithm available. Unable to calculate " +
"checksum and verify data integrity.", e);
}
}
if (metadata.getContentType() == null) {
/*
* Default to the "application/octet-stream" if the user hasn't
* specified a content type.
*/
metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
}
populateRequestMetadata(request, metadata);
signRequest(request, HttpMethodName.PUT, bucketName, key);
HttpRequest httpRequest = convertToHttpRequest(request, HttpMethodName.PUT);
httpRequest.setContent(input);
ObjectMetadata returnedMetadata = null;
try {
S3MetadataResponseHandler responseHandler = new S3MetadataResponseHandler();
returnedMetadata = (ObjectMetadata)client.execute(httpRequest, responseHandler, errorResponseHandler);
} finally {
try {input.close();} catch (Exception e) {
log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
}
}
String contentMd5 = metadata.getContentMD5();
if (input instanceof MD5DigestCalculatingInputStream) {
MD5DigestCalculatingInputStream md5DigestInputStream = (MD5DigestCalculatingInputStream)input;
contentMd5 = ServiceUtils.toBase64(md5DigestInputStream.getMd5Digest());
}
if (returnedMetadata != null && contentMd5 != null) {
byte[] clientSideHash = ServiceUtils.fromBase64(contentMd5);
byte[] serverSideHash = ServiceUtils.fromHex(returnedMetadata.getETag());
if (!Arrays.equals(clientSideHash, serverSideHash)) {
throw new AmazonClientException("Unable to verify integrity of data upload. " +
"Client calculated content hash didn't match hash calculated by Amazon S3. " +
"You may need to delete the data stored in Amazon S3.");
}
}