Package org.jets3t.service.model

Examples of org.jets3t.service.model.MultipartUpload


  public void copyLargeFile(S3Object srcObject, String dstKey) throws IOException {
    try {
      long partCount = srcObject.getContentLength() / multipartCopyBlockSize +
          (srcObject.getContentLength() % multipartCopyBlockSize > 0 ? 1 : 0);

      MultipartUpload multipartUpload = s3Service.multipartStartUpload
          (bucket.getName(), dstKey, srcObject.getMetadataMap());

      List<MultipartPart> listedParts = new ArrayList<MultipartPart>();
      for (int i = 0; i < partCount; i++) {
        long byteRangeStart = i * multipartCopyBlockSize;
 
View Full Code Here


        }

        public MultipartUpload getMultipartUpload() {
            if (initiatedDate != null) {
                // Return the contents from a ListMultipartUploadsResult response
                return new MultipartUpload(uploadId, objectKey, storageClass,
                    initiator, owner, initiatedDate);
            } else {
                // Return the contents from an InitiateMultipartUploadsResult response
                return new MultipartUpload(uploadId, bucketName, objectKey);
            }
        }
View Full Code Here

             */
            String objectKeyForThreaded = "threaded-multipart-object.txt";
            Map<String, Object> metadataForThreaded = new HashMap<String, Object>();

            // Start threaded upload using normal service.
            MultipartUpload threadedMultipartUpload =
                service.multipartStartUpload(bucketName, objectKeyForThreaded, metadataForThreaded);

            // Create 5MB of test data
            byte[] fiveMBTestData = new byte[fiveMB];
            for (int offset = 0; offset < fiveMBTestData.length; offset++) {
                fiveMBTestData[offset] = (byte) (offset % 256);
            }

            // Prepare objects for upload (2 * 5MB, and 1 * 1 byte)
            S3Object[] objectsForThreadedUpload = new S3Object[] {
                new S3Object(threadedMultipartUpload.getObjectKey(), fiveMBTestData),
                new S3Object(threadedMultipartUpload.getObjectKey(), fiveMBTestData),
                new S3Object(threadedMultipartUpload.getObjectKey(), new byte[] {fiveMBTestData[0]}),
            };

            // Create threaded service and perform upload in multiple threads
            ThreadedS3Service threadedS3Service = new ThreadedS3Service(service,
                new S3ServiceEventAdaptor());
            List<MultipartUploadAndParts> uploadAndParts = new ArrayList<MultipartUploadAndParts>();
            uploadAndParts.add(new MultipartUploadAndParts(
                threadedMultipartUpload, Arrays.asList(objectsForThreadedUpload)));
            threadedS3Service.multipartUploadParts(uploadAndParts);

            // Complete threaded multipart upload using automatic part listing and normal service.
            MultipartCompleted threadedMultipartCompleted = service.multipartCompleteUpload(
                threadedMultipartUpload);

            // Confirm completed object exists and has expected size
            S3Object finalObjectForThreaded = (S3Object) service.getObjectDetails(
                bucketName, threadedMultipartUpload.getObjectKey());
            assertEquals(fiveMB * 2 + 1, finalObjectForThreaded.getContentLength());
        } finally {
            cleanupBucketForTest("testMultipartUploadWithConvenienceMethod");
        }
    }
View Full Code Here

            Map<String, Object> metadata = new HashMap<String, Object>();
            metadata.put("test-md-value", "testing, testing, 123");
            metadata.put("test-timestamp-value", System.currentTimeMillis());

            // Start a multipart upload
            MultipartUpload testMultipartUpload = service.multipartStartUpload(
                bucketName, objectKey, metadata,
                AccessControlList.REST_CANNED_AUTHENTICATED_READ, null);

            assertEquals(bucketName, testMultipartUpload.getBucketName());
            assertEquals(objectKey, testMultipartUpload.getObjectKey());

            // List all ongoing multipart uploads
            List<MultipartUpload> uploads = service.multipartListUploads(bucketName);

            assertTrue("Expected at least one ongoing upload", uploads.size() >= 1);

            // Confirm our newly-created multipart upload is present in listing
            boolean foundNewUpload = false;
            for (MultipartUpload upload: uploads) {
                if (upload.getUploadId().equals(testMultipartUpload.getUploadId())) {
                    foundNewUpload = true;
                }
            }
            assertTrue("Expected to find the new upload in listing", foundNewUpload);

            // Start a second, encrypted multipart upload
            S3Object encryptedMultiPartObject = new S3Object(objectKey + "2");
            encryptedMultiPartObject.setServerSideEncryptionAlgorithm(
                S3Object.SERVER_SIDE_ENCRYPTION__AES256);
            MultipartUpload testMultipartUpload2 =
                service.multipartStartUpload(bucketName, encryptedMultiPartObject);
            assertEquals("AES256",
                testMultipartUpload2.getMetadata().get("x-amz-server-side-encryption"));

            // List multipart uploads with markers -- Find second upload only
            uploads = service.multipartListUploads(bucketName,
                "multipart-object.txt",
                testMultipartUpload.getUploadId(),
                10);
            assertEquals(1, uploads.size());
            assertEquals(objectKey + "2", uploads.get(0).getObjectKey());

            // List multipart uploads with prefix/delimiter constraints
            MultipartUpload testMultipartUpload3 =
                service.multipartStartUpload(bucketName, objectKey + "/delimited", metadata);

            MultipartUploadChunk chunk = service.multipartListUploadsChunked(bucketName,
                "multipart-object", // prefix
                null, // delimiter
View Full Code Here

            putObject(bucketName, object);
        } else {
            log.debug("Performing multipart upload for object with data > " + maxPartSize);

            // Start upload
            MultipartUpload upload = multipartStartUpload(bucketName, object.getKey(),
                object.getMetadataMap(), object.getAcl(), object.getStorageClass());

            // Ensure upload is present on service-side, might take a little time
            boolean foundUpload = false;
            int maxTries = 5; // Allow up to 5 lookups for upload before we give up
View Full Code Here

        prepareRESTHeaderAcl(metadata, acl);

        try {
            HttpResponse httpResponse = performRestPost(
                bucketName, objectKey, metadata, requestParameters, null, false);
            MultipartUpload multipartUpload = getXmlResponseSaxParser()
                .parseInitiateMultipartUploadResult(
                    new HttpMethodReleaseInputStream(httpResponse));
            multipartUpload.setMetadata(metadata); // Add object's known metadata to result object.
            return multipartUpload;
        } catch (ServiceException se) {
            throw new S3ServiceException(se);
        }
    }
View Full Code Here

  public void copyLargeFile(S3Object srcObject, String dstKey) throws IOException {
    try {
      long partCount = srcObject.getContentLength() / multipartCopyBlockSize +
          (srcObject.getContentLength() % multipartCopyBlockSize > 0 ? 1 : 0);

      MultipartUpload multipartUpload = s3Service.multipartStartUpload
          (bucket.getName(), dstKey, srcObject.getMetadataMap());

      List<MultipartPart> listedParts = new ArrayList<MultipartPart>();
      for (int i = 0; i < partCount; i++) {
        long byteRangeStart = i * multipartCopyBlockSize;
 
View Full Code Here

TOP

Related Classes of org.jets3t.service.model.MultipartUpload

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.