.getStringProperty("crypto.algorithm", "PBEWithMD5AndDES");
encryptionUtil = new EncryptionUtil(cryptoPassword, algorithm, EncryptionUtil.DEFAULT_VERSION);
}
// Support for multipart uploads -- currently available for Amazon S3 only
MultipartUtils multipartUtils = null;
if (storageService instanceof S3Service) {
long maxUploadPartSize = properties.getLongProperty(
"upload.max-part-size", MultipartUtils.MAX_OBJECT_SIZE);
multipartUtils = new MultipartUtils(maxUploadPartSize);
}
// Repeat list and upload actions until all objects in bucket have been listed.
do {
ComparisonResult result =
compareLocalAndRemoteFiles(mergedDiscrepancyResults, bucket.getName(), rootObjectPath,
priorLastKey, objectKeyToFilepathMap, md5GenerationProgressWatcher);
priorLastKey = result.priorLastKey;
FileComparerResults discrepancyResults = result.discrepancyResults;
// Repeat upload actions until all local files have been uploaded (or we repeat listing loop)
Iterator<String> objectKeyIter = objectKeyToFilepathMap.keySet().iterator();
do {
List<LazyPreparedUploadObject> objectsToUpload = new ArrayList<LazyPreparedUploadObject>();
// Iterate through local files and perform the necessary action to synchronize them.
while (objectKeyIter.hasNext()) {
String relativeKeyPath = objectKeyIter.next();
String targetKey = relativeKeyPath;
if (rootObjectPath.length() > 0) {
if (rootObjectPath.endsWith(Constants.FILE_PATH_DELIM)) {
targetKey = rootObjectPath + targetKey;
} else {
targetKey = rootObjectPath + Constants.FILE_PATH_DELIM + targetKey;
}
}
if (isBatchMode) {
if (priorLastKey != null && targetKey.compareTo(priorLastKey) > 0) {
// We do not yet have the object listing to compare this file.
continue;
}
if (targetKey.compareTo(lastFileKeypathChecked) <= 0) {
// We have already handled this file in a prior batch.
continue;
} else {
lastFileKeypathChecked = targetKey;
}
}
File file = new File(objectKeyToFilepathMap.get(relativeKeyPath));
// Lookup and/or generate cached MD5 hash file for data file, if enabled
byte[] md5HashOfFile = null;
if (!file.isDirectory()) {
if (fileComparer.isGenerateMd5Files()) {
md5HashOfFile = fileComparer.generateFileMD5Hash(file, targetKey, null);
} else if (fileComparer.isUseMd5Files()) {
md5HashOfFile = fileComparer.lookupFileMD5Hash(file, targetKey);
}
}
if (discrepancyResults.onlyOnClientKeys.contains(relativeKeyPath)) {
printOutputLine("N " + targetKey, REPORT_LEVEL_ACTIONS);
objectsToUpload.add(new LazyPreparedUploadObject(
targetKey, file, md5HashOfFile, aclString, encryptionUtil));
} else if (discrepancyResults.updatedOnClientKeys.contains(relativeKeyPath)) {
printOutputLine("U " + targetKey, REPORT_LEVEL_ACTIONS);
objectsToUpload.add(new LazyPreparedUploadObject(
targetKey, file, md5HashOfFile, aclString, encryptionUtil));
} else if (discrepancyResults.alreadySynchronisedKeys.contains(relativeKeyPath)
|| discrepancyResults.alreadySynchronisedLocalPaths.contains(relativeKeyPath))
{
if (isForce) {
printOutputLine("F " + targetKey, REPORT_LEVEL_ACTIONS);
objectsToUpload.add(new LazyPreparedUploadObject(
targetKey, file, md5HashOfFile, aclString, encryptionUtil));
} else {
printOutputLine("- " + targetKey, REPORT_LEVEL_ALL);
}
} else if (discrepancyResults.updatedOnServerKeys.contains(relativeKeyPath)) {
// This file has been updated on the server-side.
if (isKeepFiles) {
printOutputLine("r " + targetKey, REPORT_LEVEL_DIFFERENCES);
} else {
printOutputLine("R " + targetKey, REPORT_LEVEL_ACTIONS);
objectsToUpload.add(new LazyPreparedUploadObject(
targetKey, file, md5HashOfFile, aclString, encryptionUtil));
}
} else {
// Uh oh, program error here. The safest thing to do is abort!
throw new SynchronizeException("Invalid discrepancy comparison details for file "
+ file.getPath()
+ ". Sorry, this is a program error - aborting to keep your data safe");
}
// If we're batching, break out of upload preparation loop and
// actually upload files once we have our quota.
if (isBatchMode
&& objectsToUpload.size() >= Constants.DEFAULT_OBJECT_LIST_CHUNK_SIZE)
{
printOutputLine(
"Uploading batch of " + objectsToUpload.size() + " files",
REPORT_LEVEL_ACTIONS);
break;
}
}
// Break uploads into (smaller) batches if we are transforming files during upload
int uploadBatchSize = objectsToUpload.size();
if ((isEncryptionEnabled || isGzipEnabled)
&& properties.containsKey("upload.transformed-files-batch-size"))
{
// Limit uploads to small batches in batch mode -- based on the
// number of upload threads that are available.
uploadBatchSize = properties.getIntProperty("upload.transformed-files-batch-size", 1000);
}
// Upload New/Updated/Forced/Replaced objects.
while (doAction && objectsToUpload.size() > 0) {
List<StorageObject> objectsForStandardPut = new ArrayList<StorageObject>();
List<StorageObject> objectsForMultipartUpload = new ArrayList<StorageObject>();
// Invoke lazy upload object creator.
int maxBatchSize = Math.min(uploadBatchSize, objectsToUpload.size());
for (int i = 0; i < maxBatchSize; i++) {
LazyPreparedUploadObject lazyObj = objectsToUpload.remove(0);
StorageObject object = null;
try {
object = lazyObj.prepareUploadObject();
} catch (FileNotFoundException e) {
if (skipMissingFiles) {
printOutputLine(
"WARNING: Skipping unreadable file: "
+ lazyObj.getFile().getAbsolutePath(),
REPORT_LEVEL_NONE);
continue;
} else {
throw e;
}
}
if (multipartUtils != null
&& multipartUtils.isFileLargerThanMaxPartSize(lazyObj.getFile()))
{
objectsForMultipartUpload.add(object);
} else {
objectsForStandardPut.add(object);
}
}
// Perform standard object uploads
if (objectsForStandardPut.size() > 0) {
(new ThreadedStorageService(storageService, serviceEventAdaptor)).putObjects(
bucket.getName(), objectsForStandardPut.toArray(new StorageObject[] {}));
serviceEventAdaptor.throwErrorIfPresent();
}
// Perform multipart uploads
if (objectsForMultipartUpload.size() > 0) {
multipartUtils.uploadObjects(
bucket.getName(), (S3Service)storageService,
objectsForMultipartUpload, serviceEventAdaptor);
}
}
} while (objectKeyIter.hasNext()); // End of upload loop