Package org.jets3t.service.utils

Examples of org.jets3t.service.utils.MultipartUtils


    }

    List<StorageObject> objectsToUploadAsMultipart =
        new ArrayList<StorageObject>();
    objectsToUploadAsMultipart.add(object);
    MultipartUtils mpUtils = new MultipartUtils(multipartBlockSize);

    try {
      mpUtils.uploadObjects(bucket.getName(), s3Service,
                            objectsToUploadAsMultipart, null);
    } catch (Exception e) {
      handleException(e, key);
    }
  }
View Full Code Here


        List<StorageObject> objectsToUploadAsMultipart = new ArrayList<StorageObject>();
        objectsToUploadAsMultipart.add(largeFileObject);

        long maxSizeForAPartInBytes = 20 * 1024 * 1024;
        MultipartUtils mpUtils = new MultipartUtils(maxSizeForAPartInBytes);

        mpUtils.uploadObjects(BUCKET_NAME, s3Service, objectsToUploadAsMultipart,
            null // eventListener : Provide one to monitor the upload progress
            );

        // The S3Service API also provides the underlying low-level multipart operations
        // if you need more control over the process. See the method names that
View Full Code Here

                .getStringProperty("crypto.algorithm", "PBEWithMD5AndDES");
            encryptionUtil = new EncryptionUtil(cryptoPassword, algorithm, EncryptionUtil.DEFAULT_VERSION);
        }

        // Support for multipart uploads -- currently available for Amazon S3 only
        MultipartUtils multipartUtils = null;
        if (storageService instanceof S3Service) {
            long maxUploadPartSize = properties.getLongProperty(
                "upload.max-part-size", MultipartUtils.MAX_OBJECT_SIZE);
            multipartUtils = new MultipartUtils(maxUploadPartSize);
        }

        // Repeat list and upload actions until all objects in bucket have been listed.
        do {
            ComparisonResult result =
                compareLocalAndRemoteFiles(mergedDiscrepancyResults, bucket.getName(), rootObjectPath,
                    priorLastKey, objectKeyToFilepathMap, md5GenerationProgressWatcher);
            priorLastKey = result.priorLastKey;
            FileComparerResults discrepancyResults = result.discrepancyResults;

            // Repeat upload actions until all local files have been uploaded (or we repeat listing loop)
            Iterator<String> objectKeyIter = objectKeyToFilepathMap.keySet().iterator();
            do {
                List<LazyPreparedUploadObject> objectsToUpload = new ArrayList<LazyPreparedUploadObject>();

                // Iterate through local files and perform the necessary action to synchronize them.
                while (objectKeyIter.hasNext()) {
                    String relativeKeyPath = objectKeyIter.next();

                    String targetKey = relativeKeyPath;
                    if (rootObjectPath.length() > 0) {
                        if (rootObjectPath.endsWith(Constants.FILE_PATH_DELIM)) {
                            targetKey = rootObjectPath + targetKey;
                        } else {
                            targetKey = rootObjectPath + Constants.FILE_PATH_DELIM + targetKey;
                        }
                    }

                    if (isBatchMode) {
                        if (priorLastKey != null && targetKey.compareTo(priorLastKey) > 0) {
                            // We do not yet have the object listing to compare this file.
                            continue;
                        }

                        if (targetKey.compareTo(lastFileKeypathChecked) <= 0) {
                            // We have already handled this file in a prior batch.
                            continue;
                        } else {
                            lastFileKeypathChecked = targetKey;
                        }
                    }

                    File file = new File(objectKeyToFilepathMap.get(relativeKeyPath));

                    // Lookup and/or generate cached MD5 hash file for data file, if enabled
                    byte[] md5HashOfFile = null;
                    if (!file.isDirectory()) {
                        if (fileComparer.isGenerateMd5Files()) {
                            md5HashOfFile = fileComparer.generateFileMD5Hash(file, targetKey, null);
                        } else if (fileComparer.isUseMd5Files()) {
                            md5HashOfFile = fileComparer.lookupFileMD5Hash(file, targetKey);
                        }
                    }

                    if (discrepancyResults.onlyOnClientKeys.contains(relativeKeyPath)) {
                        printOutputLine("N " + targetKey, REPORT_LEVEL_ACTIONS);
                        objectsToUpload.add(new LazyPreparedUploadObject(
                            targetKey, file, md5HashOfFile, aclString, encryptionUtil));
                    } else if (discrepancyResults.updatedOnClientKeys.contains(relativeKeyPath)) {
                        printOutputLine("U " + targetKey, REPORT_LEVEL_ACTIONS);
                        objectsToUpload.add(new LazyPreparedUploadObject(
                            targetKey, file, md5HashOfFile, aclString, encryptionUtil));
                    } else if (discrepancyResults.alreadySynchronisedKeys.contains(relativeKeyPath)
                               || discrepancyResults.alreadySynchronisedLocalPaths.contains(relativeKeyPath))
                    {
                        if (isForce) {
                            printOutputLine("F " + targetKey, REPORT_LEVEL_ACTIONS);
                            objectsToUpload.add(new LazyPreparedUploadObject(
                                targetKey, file, md5HashOfFile, aclString, encryptionUtil));
                        } else {
                            printOutputLine("- " + targetKey, REPORT_LEVEL_ALL);
                        }
                    } else if (discrepancyResults.updatedOnServerKeys.contains(relativeKeyPath)) {
                        // This file has been updated on the server-side.
                        if (isKeepFiles) {
                            printOutputLine("r " + targetKey, REPORT_LEVEL_DIFFERENCES);
                        } else {
                            printOutputLine("R " + targetKey, REPORT_LEVEL_ACTIONS);
                            objectsToUpload.add(new LazyPreparedUploadObject(
                                targetKey, file, md5HashOfFile, aclString, encryptionUtil));
                        }
                    } else {
                        // Uh oh, program error here. The safest thing to do is abort!
                        throw new SynchronizeException("Invalid discrepancy comparison details for file "
                            + file.getPath()
                            + ". Sorry, this is a program error - aborting to keep your data safe");
                    }

                    // If we're batching, break out of upload preparation loop and
                    // actually upload files once we have our quota.
                    if (isBatchMode
                        && objectsToUpload.size() >= Constants.DEFAULT_OBJECT_LIST_CHUNK_SIZE)
                    {
                        printOutputLine(
                            "Uploading batch of " + objectsToUpload.size() + " files",
                            REPORT_LEVEL_ACTIONS);
                        break;
                    }
                }

                // Break uploads into (smaller) batches if we are transforming files during upload
                int uploadBatchSize = objectsToUpload.size();
                if ((isEncryptionEnabled || isGzipEnabled)
                    && properties.containsKey("upload.transformed-files-batch-size"))
                {
                    // Limit uploads to small batches in batch mode -- based on the
                    // number of upload threads that are available.
                    uploadBatchSize = properties.getIntProperty("upload.transformed-files-batch-size", 1000);
                }

                // Upload New/Updated/Forced/Replaced objects.
                while (doAction && objectsToUpload.size() > 0) {
                    List<StorageObject> objectsForStandardPut = new ArrayList<StorageObject>();
                    List<StorageObject> objectsForMultipartUpload = new ArrayList<StorageObject>();

                    // Invoke lazy upload object creator.
                    int maxBatchSize = Math.min(uploadBatchSize, objectsToUpload.size());
                    for (int i = 0; i < maxBatchSize; i++) {
                        LazyPreparedUploadObject lazyObj = objectsToUpload.remove(0);
                        StorageObject object = null;

                        try {
                            object = lazyObj.prepareUploadObject();
                        } catch (FileNotFoundException e) {
                            if (skipMissingFiles) {
                                printOutputLine(
                                    "WARNING: Skipping unreadable file: "
                                    + lazyObj.getFile().getAbsolutePath(),
                                    REPORT_LEVEL_NONE);
                                continue;
                            } else {
                                throw e;
                            }
                        }

                        if (multipartUtils != null
                            && multipartUtils.isFileLargerThanMaxPartSize(lazyObj.getFile()))
                        {
                            objectsForMultipartUpload.add(object);
                        } else {
                            objectsForStandardPut.add(object);
                        }
                    }

                    // Perform standard object uploads
                    if (objectsForStandardPut.size() > 0) {
                        (new ThreadedStorageService(storageService, serviceEventAdaptor)).putObjects(
                            bucket.getName(), objectsForStandardPut.toArray(new StorageObject[] {}));
                        serviceEventAdaptor.throwErrorIfPresent();
                    }

                    // Perform multipart uploads
                    if (objectsForMultipartUpload.size() > 0) {
                        multipartUtils.uploadObjects(
                            bucket.getName(), (S3Service)storageService,
                            objectsForMultipartUpload, serviceEventAdaptor);
                    }
                }
            } while (objectKeyIter.hasNext()); // End of upload loop
View Full Code Here

        String bucketName = bucket.getName();

        try {
            // Ensure constructor enforces sanity constraints
            try {
                new MultipartUtils(MultipartUtils.MIN_PART_SIZE - 1);
                fail("Expected failure creating MultipartUtils with illegally small part size");
            } catch (IllegalArgumentException e) {}

            try {
                new MultipartUtils(MultipartUtils.MAX_OBJECT_SIZE + 1);
                fail("Expected failure creating MultipartUtils with illegally large part size");
            } catch (IllegalArgumentException e) {}

            // Default part size is maximum possible
            MultipartUtils multipartUtils = new MultipartUtils();
            assertEquals("Unexpected default part size",
                MultipartUtils.MAX_OBJECT_SIZE, multipartUtils.getMaxPartSize());

            // Create a util with the minimum part size, for quicker testing
            multipartUtils = new MultipartUtils(MultipartUtils.MIN_PART_SIZE);
            assertEquals("Unexpected default part size",
                MultipartUtils.MIN_PART_SIZE, multipartUtils.getMaxPartSize());

            // Create a large (11 MB) file
            File largeFile = File.createTempFile("JetS3t-testMultipartUtils-large", ".txt");
            BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(largeFile));
            int offset = 0;
            while (offset < 11 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            // Create a medium (6 MB) file
            File mediumFile = File.createTempFile("JetS3t-testMultipartUtils-medium", ".txt");
            bos = new BufferedOutputStream(new FileOutputStream(mediumFile));
            offset = 0;
            while (offset < 6 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            // Create a small (5 MB) file
            File smallFile = File.createTempFile("JetS3t-testMultipartUtils-small", ".txt");
            bos = new BufferedOutputStream(new FileOutputStream(smallFile));
            offset = 0;
            while (offset < 5 * 1024 * 1024) {
                bos.write((offset++ % 256));
            }
            bos.close();

            assertFalse("Expected small file to be <= 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(smallFile));
            assertTrue("Expected medium file to be > 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(mediumFile));
            assertTrue("Expected large file to be > 5MB",
                multipartUtils.isFileLargerThanMaxPartSize(largeFile));

            // Split small file into 5MB object parts
            List<S3Object> parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                smallFile.getName(), smallFile);
            assertEquals(1, parts.size());

            // Split medium file into 5MB object parts
            parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                mediumFile.getName(), mediumFile);
            assertEquals(2, parts.size());

            // Split large file into 5MB object parts
            parts = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                largeFile.getName(), largeFile);
            assertEquals(3, parts.size());

            /*
             * Upload medium-sized file as object in multiple parts
             */
            List<StorageObject> objects = new ArrayList<StorageObject>();
            objects.add(
                ObjectUtils.createObjectForUpload(
                    mediumFile.getName(),
                    mediumFile,
                    null, // encryptionUtil
                    false // gzipFile
                ));

            multipartUtils.uploadObjects(bucketName, service, objects, null);

            S3Object completedObject = (S3Object) service.getObjectDetails(
                bucketName, mediumFile.getName());
            assertEquals(mediumFile.length(), completedObject.getContentLength());
            // Confirm object's mimetype metadata was applied
            assertEquals("text/plain", completedObject.getContentType());

            /*
             * Upload large-sized file as object in multiple parts
             */
            objects = new ArrayList<StorageObject>();
            objects.add(
                ObjectUtils.createObjectForUpload(
                    largeFile.getName(),
                    largeFile,
                    null, // encryptionUtil
                    false // gzipFile
                ));

            multipartUtils.uploadObjects(bucketName, service, objects, null);

            completedObject = (S3Object) service.getObjectDetails(
                bucketName, largeFile.getName());
            assertEquals(largeFile.length(), completedObject.getContentLength());
        } finally {
View Full Code Here

        if (object.getDataInputFile() == null) {
            throw new ServiceException(
                "multipartUpload method only supports file-based objects");
        }

        MultipartUtils multipartUtils = new MultipartUtils(maxPartSize);

        // Upload object normally if it doesn't exceed maxPartSize
        if (!multipartUtils.isFileLargerThanMaxPartSize(object.getDataInputFile())) {
            log.debug("Performing normal PUT upload for object with data <= " + maxPartSize);
            putObject(bucketName, object);
        } else {
            log.debug("Performing multipart upload for object with data > " + maxPartSize);

            // Start upload
            MultipartUpload upload = multipartStartUpload(bucketName, object.getKey(),
                object.getMetadataMap(), object.getAcl(), object.getStorageClass());

            // Ensure upload is present on service-side, might take a little time
            boolean foundUpload = false;
            int maxTries = 5; // Allow up to 5 lookups for upload before we give up
            int tries = 0;
            do {
                try {
                    multipartListParts(upload);
                    foundUpload = true;
                } catch (S3ServiceException e) {
                    if ("NoSuchUpload".equals(e.getErrorCode())) {
                        tries++;
                        try {
                            Thread.sleep(1000); // Wait for a second
                        } catch (InterruptedException ie) {
                            tries = maxTries;
                        }
                    } else {
                        // Bail out if we get a (relatively) unexpected exception
                        throw e;
                    }
                }
            } while (!foundUpload && tries < maxTries);

            if (!foundUpload) {
                throw new ServiceException(
                    "Multipart upload was started but unavailable for use after "
                    + tries + " attempts, giving up");
            }

            // Will attempt to delete multipart upload upon failure.
            try {
                List<S3Object> partObjects = multipartUtils.splitFileIntoObjectsByMaxPartSize(
                    object.getKey(), object.getDataInputFile());

                List<MultipartPart> parts = new ArrayList<MultipartPart>();
                int partNumber = 1;
                for (S3Object partObject: partObjects) {
View Full Code Here

    }

    List<StorageObject> objectsToUploadAsMultipart =
        new ArrayList<StorageObject>();
    objectsToUploadAsMultipart.add(object);
    MultipartUtils mpUtils = new MultipartUtils(multipartBlockSize);

    try {
      mpUtils.uploadObjects(bucket.getName(), s3Service,
                            objectsToUploadAsMultipart, null);
    } catch (ServiceException e) {
      handleServiceException(e);
    } catch (Exception e) {
      throw new S3Exception(e);
View Full Code Here

        Preconditions.checkNotNull(bucket, "bucket is needed");
    }

    protected void initialize() {
        if (mpUtils == null) { // not injected
            mpUtils = new MultipartUtils(maxPartSize);
        }
        try {
            Jets3tProperties properties = new Jets3tProperties();
            properties.setProperty("s3service.s3-endpoint", s3Endpoint);
            if (credentialsProvider.getCredentials() instanceof AWSSessionCredentials) {
View Full Code Here

                                @Override
                                public void refresh() {
                                }
                            });

                        MultipartUtils mpUtils = mock(MultipartUtils.class);
                        try {
                            doAnswer(new Answer() {
                                @Override
                                public Object answer(InvocationOnMock invocation) throws Throwable {
                                    Thread.sleep(1000);
View Full Code Here

TOP

Related Classes of org.jets3t.service.utils.MultipartUtils

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.