Package org.jets3t.service.model

Examples of org.jets3t.service.model.S3Object


        this.currentObjectIndex = 0;
        displayObjectProperties();
    }
   
    private void displayObjectProperties() {
        S3Object object = objects[currentObjectIndex];
       
        // Manage previous/next buttons.
        if (objects.length > 1) {
            nextPreviousPanel.setVisible(true);
            currentObjectLabel.setText((currentObjectIndex + 1) + " of " + objects.length);
            previousObjectButton.setEnabled(currentObjectIndex > 0);
            nextObjectButton.setEnabled(currentObjectIndex < (objects.length -1));
        } else {
            nextPreviousPanel.setVisible(false);           
        }
       
        objectKeyTextField.setText(object.getKey());
        objectContentTypeTextField.setText(object.getContentType());
        objectContentLengthTextField.setText(String.valueOf(object.getContentLength()));
        objectLastModifiedTextField.setText(String.valueOf(object.getLastModifiedDate()));
        objectETagTextField.setText(object.getETag());
        bucketNameTextField.setText(object.getBucketName());

        if (object.getOwner() != null) {
            ownerNameLabel.setVisible(true);
            ownerNameTextField.setVisible(true);
            ownerIdLabel.setVisible(true);
            ownerIdTextField.setVisible(true);           
            ownerNameTextField.setText(object.getOwner().getDisplayName());
            ownerIdTextField.setText(object.getOwner().getId());
        } else {
            ownerNameLabel.setVisible(false);
            ownerNameTextField.setVisible(false);
            ownerIdLabel.setVisible(false);
            ownerIdTextField.setVisible(false);           
        }
       
        // Clear old table contents
        while (objectMetadataTableModel.getRowCount() > 0) {
            objectMetadataTableModel.removeRow(0);
        }       

        // Remove the metadata items already displayed, or not suitable for gui display.
        Map objectMetadata = new HashMap(object.getMetadataMap());
        objectMetadata.remove(S3Object.METADATA_HEADER_CONTENT_LENGTH);
        objectMetadata.remove(S3Object.METADATA_HEADER_CONTENT_TYPE);
        objectMetadata.remove(S3Object.METADATA_HEADER_DATE);
        objectMetadata.remove(S3Object.METADATA_HEADER_ETAG);
        objectMetadata.remove(S3Object.METADATA_HEADER_LAST_MODIFIED_DATE);
View Full Code Here


        }

        public void run() {           
            BufferedInputStream bufferedInputStream = null;
            BufferedOutputStream bufferedOutputStream = null;
            S3Object object = null;

            try {
              if (!downloadPackage.isSignedDownload()) {
                object = s3Service.getObject(bucket, objectKey);
              } else {
                    SignedUrlHandler handler = (SignedUrlHandler) s3Service;
                    object = handler.getObjectWithSignedUrl(downloadPackage.getSignedUrl());
              }

                // Setup monitoring of stream bytes tranferred.
                interruptableInputStream = new InterruptableInputStream(object.getDataInputStream());
                bufferedInputStream = new BufferedInputStream(
                    new ProgressMonitoredInputStream(interruptableInputStream, progressMonitor));
               
                bufferedOutputStream = new BufferedOutputStream(
                    downloadPackage.getOutputStream());

                try {
                    byte[] buffer = new byte[1024];
                    int byteCount = -1;
   
                    while ((byteCount = bufferedInputStream.read(buffer)) != -1) {
                        bufferedOutputStream.write(buffer, 0, byteCount);
                    }
                } finally {
                    if (bufferedOutputStream != null) {
                        bufferedOutputStream.close();                       
                    }
                    if (bufferedInputStream != null) {
                        bufferedInputStream.close();                   
                    }
                }               

                object.setDataInputStream(null);
                object.setDataInputFile(downloadPackage.getDataFile());
               
                // If data was downloaded to a file, set the file's Last Modified date
                // to the original last modified date metadata stored with the object.               
                if (restoreLastModifiedDate && downloadPackage.getDataFile() != null) {
                    String metadataLocalFileDate = (String) object.getMetadata(
                        Constants.METADATA_JETS3T_LOCAL_FILE_DATE);
                   
                    // Try to retrieve the original date using the deprecated metadata name.
                    if (metadataLocalFileDate == null) {
                        metadataLocalFileDate = (String) object.getMetadata(
                            Constants.METADATA_JETS3T_LOCAL_FILE_DATE_DEPRECATED);
                    }
                   
                    if (metadataLocalFileDate != null) {
                        log.debug("Restoring original Last Modified date for object '"
                            + object.getKey() + "' to file '" + downloadPackage.getDataFile()
                            + "': " + metadataLocalFileDate);
                        downloadPackage.getDataFile().setLastModified(
                            ServiceUtils.parseIso8601Date(metadataLocalFileDate).getTime());
                    }
                }
View Full Code Here

        // Create a bucket to test reading and writing to       
        S3Bucket bucket = new S3Bucket(myBucketName);
       
        // Create an object to use for testing.
        S3Object object = new S3Object(bucket, "urlSigningTestObject.txt", "Hello World!");
       
        // Determine what the time will be in 5 minutes - our signed URLs will be valid for 5 minutes only.
        Calendar cal = Calendar.getInstance();
        cal.add(Calendar.MINUTE, 5);
        Date expiryDate = cal.getTime();

        /*
         * Generate the signed URL strings for PUT, GET, HEAD and DELETE operations, using the
         * AWS Credentials in the samples.properties file.
         */
        AWSCredentials awsCredentials = SamplesUtils.loadAWSCredentials();
       
        // Create a signed HTTP PUT URL valid for 5 minutes.       
        String putUrl = S3Service.createSignedPutUrl(bucket.getName(), object.getKey(),
            object.getMetadataMap(), awsCredentials, expiryDate, false);
       
        // Create a signed HTTP GET URL valid for 5 minutes.
        String getUrl = S3Service.createSignedGetUrl(bucket.getName(), object.getKey()
            awsCredentials, expiryDate, false);

        // Create a signed HTTP HEAD URL valid for 5 minutes.
        String headUrl = S3Service.createSignedHeadUrl(bucket.getName(), object.getKey(),
            awsCredentials, expiryDate, false);

        // Create a signed HTTP DELETE URL valid for 5 minutes.
        String deleteUrl = S3Service.createSignedDeleteUrl(bucket.getName(), object.getKey(),
            awsCredentials, expiryDate, false);
       
        System.out.println("Signed PUT URL: " + putUrl);
        System.out.println("Signed GET URL: " + getUrl);
        System.out.println("Signed HEAD URL: " + headUrl);
        System.out.println("Signed DELETE URL: " + deleteUrl);

        System.out.println("Performing PUT with signed URL");
        S3Object putObject = signedUrlHandler.putObjectWithSignedUrl(putUrl, object);
        System.out.println("  Object has been uploaded to S3: " + putObject.getKey());

        System.out.println("Performing HEAD with signed URL");       
        S3Object headObject = signedUrlHandler.getObjectDetailsWithSignedUrl(headUrl);
        System.out.println("  Size of object in S3: " + headObject.getContentLength());

        System.out.println("Performing GET with signed URL");
        S3Object getObject = signedUrlHandler.getObjectWithSignedUrl(getUrl);
        String contentData = (new BufferedReader(
            new InputStreamReader(getObject.getDataInputStream()))).readLine();
        System.out.println("  Content of object in S3: " + contentData);
       
        System.out.println("Performing DELETE with signed URL");
        signedUrlHandler.deleteObjectWithSignedUrl(deleteUrl);
        System.out.println("  Object deleted - the example is finished");
View Full Code Here

     * exceptions could include IO failures, gzipping and encryption failures.
     */
    public static S3Object createObjectForUpload(String objectKey, File dataFile,
        EncryptionUtil encryptionUtil, boolean gzipFile, BytesProgressWatcher progressWatcher) throws Exception
    {
        S3Object s3Object = new S3Object(objectKey);
       
        // Set object explicitly to private access by default.
        s3Object.setAcl(AccessControlList.REST_CANNED_PRIVATE);
       
        s3Object.addMetadata(Constants.METADATA_JETS3T_LOCAL_FILE_DATE,
            ServiceUtils.formatIso8601Date(new Date(dataFile.lastModified())));

        if (dataFile.isDirectory()) {
            s3Object.setContentLength(0);
            s3Object.setContentType(Mimetypes.MIMETYPE_JETS3T_DIRECTORY);
        } else {
            s3Object.setContentType(Mimetypes.getInstance().getMimetype(dataFile));
            File uploadFile = transformUploadFile(dataFile, s3Object, encryptionUtil,
                gzipFile, progressWatcher);
            s3Object.setContentLength(uploadFile.length());
            s3Object.setDataInputFile(uploadFile);
           
            // Compute the upload file's MD5 hash.
            InputStream inputStream = new BufferedInputStream(new FileInputStream(uploadFile));
            if (progressWatcher != null) {
                inputStream = new ProgressMonitoredInputStream(inputStream, progressWatcher);
            }
            s3Object.setMd5Hash(ServiceUtils.computeMD5Hash(inputStream));
           
            if (!uploadFile.equals(dataFile)) {
                // Compute the MD5 hash of the *original* file, if upload file has been altered
                // through encryption or gzipping.
                inputStream = new BufferedInputStream(new FileInputStream(dataFile));
                if (progressWatcher != null) {
                    inputStream = new ProgressMonitoredInputStream(inputStream, progressWatcher);
                }               
               
                s3Object.addMetadata(
                    S3Object.METADATA_HEADER_ORIGINAL_HASH_MD5,
                    ServiceUtils.toBase64(ServiceUtils.computeMD5Hash(inputStream)));
            }           
        }
        return s3Object;
View Full Code Here

       
        // Create the target bucket
        bucket = s3Service.createBucket(bucketName);

        // Upload credentials object, which must be publicly readable.
        S3Object credsObject = new S3Object(credentialObjectName);       
        credsObject.setDataInputStream(bais);
        credsObject.setAcl(AccessControlList.REST_CANNED_PUBLIC_READ);
        s3Service.putObject(bucket, credsObject);

        /*
         * Retrieve credentials.
         */
       
        // Initialise an S3 Service that does not know the AWS credentials.
        s3Service = new RestS3Service(null);
       
        // Check whether the passphrase-based bucket exists and is accessible.
        System.out.println("Is bucket accessible? " + s3Service.isBucketAccessible(bucketName));
       
        // Download the encrypted credentials object.
        S3Object retrievedCredsObject = s3Service.getObject(bucket, credentialObjectName);
       
        // Decrypt the credentials object.
        AWSCredentials retrievedCreds = AWSCredentials.load(password,
            new BufferedInputStream(retrievedCredsObject.getDataInputStream()));
       
        System.out.println("Retrieved credentials from S3: "
            + retrievedCreds.getAccessKey() + " : " + retrievedCreds.getSecretKey());
    }
View Full Code Here

        // Check files on server against local client files.
        Iterator s3ObjectsMapIter = s3ObjectsMap.entrySet().iterator();
        while (s3ObjectsMapIter.hasNext()) {
            Map.Entry entry = (Map.Entry) s3ObjectsMapIter.next();
            String keyPath = (String) entry.getKey();
            S3Object s3Object = (S3Object) entry.getValue();

            // A special-case check to identify objects created by Panic's
            // Transmit application that serve as directory placehoders -
            // a similar concept to the placeholders JetS3t uses but sadly
            // these look different.
            if (keyPath.endsWith("/")
                && s3Object.getContentLength() == 0
                && "binary/octet-stream".equals(s3Object.getContentType()))
            {
                boolean ignorePanicDirPlaceholders =
                    Jets3tProperties.getInstance(Constants.JETS3T_PROPERTIES_FILENAME)
                    .getBoolProperty("filecomparer.ignore-panic-dir-placeholders", false);
               
                if (ignorePanicDirPlaceholders) {                               
                    log.debug("Ignoring object that looks like a directory " +
                        "placeholder created by Panic's Transmit application: " + keyPath);
                    alreadySynchronisedKeys.add(keyPath);               
                    continue;
                } else {
                    log.warn("Identified an object that looks like a directory " +
                        "placeholder created by Panic's Transmit application. " +
                        "If this object was indeed created by Transmit, it will not " +
                        "be handled properly unless the JetS3t property " +
                        "\"filecomparer.ignore-panic-dir-placeholders\" is set to " +
                        "true. " + s3Object);                   
                }
            }

            // Check whether local file is already on server
            if (filesMap.containsKey(keyPath)) {                               
                // File has been backed up in the past, is it still up-to-date?
                File file = (File) filesMap.get(keyPath);

                if (file.isDirectory()) {
                    // We don't care about directory date changes, as long as it's present.
                    alreadySynchronisedKeys.add(keyPath);
                } else {
                    // Compare file hashes.
                    boolean useMd5Files =
                        Jets3tProperties.getInstance(Constants.JETS3T_PROPERTIES_FILENAME)
                        .getBoolProperty("filecomparer.use-md5-files", false);

                    boolean generateMd5Files =
                        Jets3tProperties.getInstance(Constants.JETS3T_PROPERTIES_FILENAME)
                        .getBoolProperty("filecomparer.generate-md5-files", false);                                       
                   
                    byte[] computedHash = null;
                   
                    // Check whether a pre-computed MD5 hash file is available
                    File computedHashFile = new File(file.getPath() + ".md5");
                    if (useMd5Files
                        && computedHashFile.canRead()
                        && computedHashFile.lastModified() > file.lastModified())
                    {
                        try {
                            // A pre-computed MD5 hash file is available, try to read this hash value
                            BufferedReader br = new BufferedReader(new FileReader(computedHashFile));
                            computedHash = ServiceUtils.fromHex(br.readLine().split("\\s")[0]);
                            br.close();
                        } catch (Exception e) {
                            log.warn("Unable to read hash from computed MD5 file", e);
                        }
                    }
                   
                    if (computedHash == null) {
                        // A pre-computed hash file was not available, or could not be read.
                        // Calculate the hash value anew.
                        InputStream hashInputStream = null;
                        if (progressWatcher != null) {
                            hashInputStream = new ProgressMonitoredInputStream( // Report on MD5 hash progress.
                                new FileInputStream(file), progressWatcher);
                        } else {
                            hashInputStream = new FileInputStream(file);
                        }
                        computedHash = ServiceUtils.computeMD5Hash(hashInputStream);
                    }
                                                                               
                    String fileHashAsBase64 = ServiceUtils.toBase64(computedHash);
                   
                    if (generateMd5Files && !file.getName().endsWith(".md5") &&
                        (!computedHashFile.exists()
                        || computedHashFile.lastModified() < file.lastModified()))
                    {
                        // Create or update a pre-computed MD5 hash file.
                        try {
                            FileWriter fw = new FileWriter(computedHashFile);                           
                            fw.write(ServiceUtils.toHex(computedHash));
                            fw.close();
                        } catch (Exception e) {
                            log.warn("Unable to write computed MD5 hash to a file", e);
                        }
                    }
                   
                    // Get the S3 object's Base64 hash.
                    String objectHash = null;
                    if (s3Object.containsMetadata(S3Object.METADATA_HEADER_ORIGINAL_HASH_MD5)) {
                        // Use the object's *original* hash, as it is an encoded version of a local file.
                        objectHash = (String) s3Object.getMetadata(
                            S3Object.METADATA_HEADER_ORIGINAL_HASH_MD5);
                        log.debug("Object in S3 is encoded, using the object's original hash value for: "
                            + s3Object.getKey());
                    } else {
                        // The object wasn't altered when uploaded, so use its current hash.
                        objectHash = s3Object.getMd5HashAsBase64();
                    }
                   
                    if (fileHashAsBase64.equals(objectHash)) {
                        // Hashes match so file is already synchronised.
                        alreadySynchronisedKeys.add(keyPath);
                    } else {
                        // File is out-of-synch. Check which version has the latest date.
                        Date s3ObjectLastModified = null;
                        String metadataLocalFileDate = (String) s3Object.getMetadata(
                            Constants.METADATA_JETS3T_LOCAL_FILE_DATE);
                       
                        // Try to retrieve the date using the deprecated metadata name
                        if (metadataLocalFileDate == null) {
                            metadataLocalFileDate = (String) s3Object.getMetadata(
                                Constants.METADATA_JETS3T_LOCAL_FILE_DATE_DEPRECATED);
                        }
                       
                        if (metadataLocalFileDate == null) {
                            // This is risky as local file times and S3 times don't match!
                            log.warn("Using S3 last modified date as file date. This is not reliable "
                                + "as the time according to S3 can differ from your local system time. "
                                + "Please use the metadata item "
                                + Constants.METADATA_JETS3T_LOCAL_FILE_DATE);
                            s3ObjectLastModified = s3Object.getLastModifiedDate();
                        } else {
                            s3ObjectLastModified = ServiceUtils
                                .parseIso8601Date(metadataLocalFileDate);
                        }
                        if (s3ObjectLastModified.getTime() > file.lastModified()) {
                            updatedOnServerKeys.add(keyPath);
                        } else if (s3ObjectLastModified.getTime() < file.lastModified()) {
                            updatedOnClientKeys.add(keyPath);
                        } else {
                            // Dates match exactly but the hash doesn't. Shouldn't ever happen!
                            throw new IOException("Backed-up S3Object " + s3Object.getKey()
                                + " and local file " + file.getName()
                                + " have the same date but different hash values. "
                                + "This shouldn't happen!");
                        }
                    }
View Full Code Here

        // uploading it to S3. These print-outs demonstrate that the S3Object returned by the
        // putObject method contains extra information provided by S3, such as the date the
        // object was last modified on an S3 server.

        // Create an empty object with a key/name, and print the object's details.
        S3Object object = new S3Object("object");
        System.out.println("S3Object before upload: " + object);

        // Upload the object to our test bucket in S3.
        object = s3Service.putObject(testBucket, object);

        // Print the details about the uploaded object, which contains more information.
        System.out.println("S3Object after upload: " + object);

        // The example above will create an empty object in S3, which isn't very useful.
        // To include data in the object you must provide some data for the object.
        // If you know the Content/Mime type of the data (e.g. text/plain) you should set this too.
       
        // S3Object's can contain any data available from an input stream, but JetS3t provides two
        // convenient object types to hold File or String data. These convenient constructors
        // automatically set the Content-Type and Content-Length of the object.
       
        // Create an S3Object based on a string, with Content-Length set automatically and
        // Content-Type set to "text/plain" 
        String stringData = "Hello World!";
        S3Object stringObject = new S3Object(testBucket, "HelloWorld.txt", stringData);
       
        // Create an S3Object based on a file, with Content-Length set automatically and
        // Content-Type set based on the file's extension (using the Mimetypes utility class)
        File fileData = new File("src/org/jets3t/samples/CodeSamples.java");
        S3Object fileObject = new S3Object(testBucket, fileData);

        // If your data isn't a File or String you can use any input stream as a data source,
        // but you must manually set the Content-Length.       
        // Note: It isn't strictly necessary to set the Content Length as the jets3t toolkit can
        // work out the value itself, however it's a good habit to do this as it can prevent
        // problems when uploading large objects.

        // Create an object containing a greeting string as input stream data.
        String greeting = "Hello World!";
        S3Object helloWorldObject = new S3Object("HelloWorld2.txt");
        ByteArrayInputStream greetingIS = new ByteArrayInputStream(
            greeting.getBytes(Constants.DEFAULT_ENCODING));
        helloWorldObject.setDataInputStream(greetingIS);
        helloWorldObject.setContentLength(greetingIS.available());
        helloWorldObject.setContentType("text/plain");

        // Upload the data objects.
        s3Service.putObject(testBucket, stringObject);
        s3Service.putObject(testBucket, fileObject);
        s3Service.putObject(testBucket, helloWorldObject);

        // Print details about the uploaded object.
        System.out.println("S3Object with data: " + helloWorldObject);


        /*
         * Downloading data objects
         */
       
        // To download data from S3 you retrieve an S3Object through the S3Service.
        // You may retrieve an object in one of two ways, with the data contents or without.

        // If you just want to know some details about an object and you don't need its contents,
        // it's faster to use the getObjectDetails method. This returns only the object's details,
        // also known as its 'HEAD'. Head information includes the object's size, date, and other
        // metadata associated with it such as the Content Type.

        // Retrieve the HEAD of the data object we created previously.
        S3Object objectDetailsOnly = s3Service.getObjectDetails(testBucket, "helloWorld.txt");
        System.out.println("S3Object, details only: " + objectDetailsOnly);

        // If you need the data contents of the object, the getObject method will return all the
        // object's details and will also set the object's DataInputStream variable from which
        // the object's data can be read.

        // Retrieve the whole data object we created previously
        S3Object objectComplete = s3Service.getObject(testBucket, "helloWorld.txt");
        System.out.println("S3Object, complete: " + objectComplete);

        // Read the data from the object's DataInputStream using a loop, and print it out.
        System.out.println("Greeting:");
        BufferedReader reader = new BufferedReader(
            new InputStreamReader(objectComplete.getDataInputStream()));
        String data = null;
        while ((data = reader.readLine()) != null) {
            System.out.println(data);
        }

        /*
         * List your buckets and objects
         */
       
        // Now that you have a bucket and some objects, it's worth listing them. Note that when
        // you list objects, the objects returned will not include much information compared to
        // what you get from the getObject and getObjectDetails methods. However, they will
        // include the size of each object

        // List all your buckets.
        S3Bucket[] buckets = s3Service.listAllBuckets();

        // List the object contents of each bucket.
        for (int b = 0; b < buckets.length; b++) {
            System.out.println("Bucket '" + buckets[b].getName() + "' contains:");
           
            // List the objects in this bucket.
            S3Object[] objects = s3Service.listObjects(buckets[b]);

            // Print out each object's key and size.
            for (int o = 0; o < objects.length; o++) {
                System.out.println(" " + objects[o].getKey() + " (" + objects[o].getContentLength() + " bytes)");
            }
        }
       
        // When listing the objects in a bucket you can filter which objects to return based on
        // the names of those objects. This is useful when you are only interested in some
        // specific objects in a bucket and you don't need to list all the bucket's contents.
       
        // List only objects whose keys match a prefix.
        String prefix = "Reports";
        String delimiter = null; // Refer to the S3 guide for more information on delimiters
        S3Object[] filteredObjects = s3Service.listObjects(testBucket, prefix, delimiter);       
       
        /*
         * Deleting objects and buckets
         */
       
        // Objects can be easily deleted. When they are gone they are gone for good so be careful.

        // Buckets may only be deleted when they are empty.

        // If you try to delete your bucket before it is empty, it will fail.
        try {
            // This will fail if the bucket isn't empty.
            s3Service.deleteBucket(testBucket.getName());
        } catch (S3ServiceException e) {
            e.printStackTrace();
        }

        // Delete all the objects in the bucket
        s3Service.deleteObject(testBucket, object.getKey());
        s3Service.deleteObject(testBucket, helloWorldObject.getKey());

        // Now that the bucket is empty, you can delete it.
        s3Service.deleteBucket(testBucket.getName());
        System.out.println("Deleted bucket " + testBucket.getName());
       
        /* ***********************
         * Multi-threaded Examples
         * ***********************
         */
       
        // The jets3t Toolkit includes utility services, S3ServiceMulti and S3ServiceSimpleMulti, that
        // can perform an S3 operation on many objects at a time. These services allow you to use more
        // of your available bandwidth and perform S3 operations much faster. They work with any
        // thread-safe S3Service implementation, such as the HTTP/REST and SOAP implementations
        // provided with jets3t.

        // The S3ServiceMulti service is intended for advanced developers. It is designed for use in 
        // graphical applications and uses an event-notification approach to communicate its results 
        // rather than standard method calls. This means the service can provide progress reports to 
        // an application during long-running operations. However, this approach makes the service
        // complicated to use. See the code for the Cockpit application to see how this service is used
        // to display progress updates.

        // The S3ServiceSimpleMulti is a service that wraps around S3ServiceMulti and provides a
        // simplified interface, so developers can take advantage of multi-threading without any extra work.

        // The examples below demonstrate how to use some of the multi-threaded operations provided by
        // S3ServiceSimpleMulti.
       
        /*
         * Construct an S3ServiceSimpleMulti service
         */      

        // To use the S3ServiceSimpleMulti service you construct it by providing an existing
        // S3Service object.

        // Create a simple multi-threading service based on our existing S3Service
        S3ServiceSimpleMulti simpleMulti = new S3ServiceSimpleMulti(s3Service);
       
        /*
         * Upload multiple objects at once
         */
       
        // To demonstrate multiple uploads, let's create some small text-data objects and a bucket to put them in.

        // First, create a bucket.
        S3Bucket bucket = new S3Bucket(awsCredentials.getAccessKey() + ".TestMulti");
        bucket = s3Service.createBucket(bucket);

        // Create an array of data objects to upload.
        S3Object[] objects = new S3Object[5];
        objects[0] = new S3Object(bucket, "object1.txt", "Hello from object 1");
        objects[1] = new S3Object(bucket, "object2.txt", "Hello from object 2");
        objects[2] = new S3Object(bucket, "object3.txt", "Hello from object 3");
        objects[3] = new S3Object(bucket, "object4.txt", "Hello from object 4");
        objects[4] = new S3Object(bucket, "object5.txt", "Hello from object 5");

        // Now we have some sample objects, we can upload them.

        // Upload multiple objects.
        S3Object[] createdObjects = simpleMulti.putObjects(bucket, objects);       
        System.out.println("Uploaded " + createdObjects.length + " objects");

        /*
         * Retrieve the HEAD information of multiple objects
         */

        // Perform a Details/HEAD query for multiple objects.
        S3Object[] objectsWithHeadDetails = simpleMulti.getObjectsHeads(bucket, objects);

        // Print out details about all the objects.
        System.out.println("Objects with HEAD Details...");
        for (int i = 0; i < objectsWithHeadDetails.length; i++) {
            System.out.println(objectsWithHeadDetails[i]);
        }

        /*
         * Download objects to local files
         */

        // The multi-threading services provide a method to download multiple objects at a time, but
        // to use this you must first prepare somewhere to put the data associated with each object.
        // The most obvious place to put this data is into a file, so let's go through an example of
        // downloading object data into files.

        // To download our objects into files we first must create a S3ObjectAndOutputStream class for
        // each object. This class is a simple container which merely associates an object with an
        // output stream, to which the object's data will be written.
       
        // Create a DownloadPackage for each object, to associate the object with an output file.
        DownloadPackage[] downloadPackages = new DownloadPackage[5];
        downloadPackages[0] = new DownloadPackage(objects[0],
            new File(objects[0].getKey()));
        downloadPackages[1] = new DownloadPackage(objects[1],
            new File(objects[1].getKey()));
        downloadPackages[2] = new DownloadPackage(objects[2],
            new File(objects[2].getKey()));
        downloadPackages[3] = new DownloadPackage(objects[3],
            new File(objects[3].getKey()));
        downloadPackages[4] = new DownloadPackage(objects[4],
            new File(objects[4].getKey()));
       
        // Download the objects.
        simpleMulti.downloadObjects(bucket, downloadPackages);
        System.out.println("Downloaded objects to current working directory");

        /*
         * Delete multiple objects
         */
       
        // It's time to clean up, so let's get rid of our multiple objects and test bucket.

        // Delete multiple objects, then the bucket too.
        simpleMulti.deleteObjects(bucket, objects);
        s3Service.deleteBucket(bucket);
        System.out.println("Deleted bucket: " + bucket);

        /* *****************
         * Advanced Examples
         * *****************
         */
       
        /*
         * Managing Metadata
         */
       
        // S3Objects can contain metadata stored as name/value pairs. This metadata is stored in
        // S3 and can be accessed when an object is retrieved from S3 using getObject
        // or getObjectDetails methods. To store metadata with an object, add your metadata to
        // the object prior to uploading it to S3.
       
        // Note that metadata cannot be updated in S3 without replacing the existing object,
        // and that metadata names must be strings without spaces.
       
        S3Object objectWithMetadata = new S3Object("metadataObject");
        objectWithMetadata.addMetadata("favourite-colour", "blue");
        objectWithMetadata.addMetadata("document-version", "0.3");
       
       
        /*
         * Save and load encrypted AWS Credentials
         */
       
        // AWS credentials are your means to login to and manage your S3 account, and should be
        // kept secure. The jets3t toolkit stores these credentials in AWSCredentials objects.
        // The AWSCredentials class provides utility methods to allow credentials to be saved to
        // an encrypted file and loaded from a previously saved file with the right password.
       
        // Save credentials to an encrypted file protected with a password.
        File credFile = new File("awscredentials.enc");
        awsCredentials.save("password", credFile);
       
        // Load encrypted credentials from a file.
        AWSCredentials loadedCredentials = AWSCredentials.load("password", credFile);
        System.out.println("AWS Key loaded from file: " + loadedCredentials.getAccessKey());
       
        // You won't get far if you use the wrong password...
        try {
            loadedCredentials = AWSCredentials.load("wrongPassword", credFile);
        } catch (S3ServiceException e) {
            System.err.println("Cannot load credentials from file with the wrong password!");
        }

        /*
         * Manage Access Control Lists
         */
       
        // S3 uses Access Control Lists to control who has access to buckets and objects in S3.
        // By default, any bucket or object you create will belong to you and will not be accessible
        // to anyone else. You can use jets3t's support for access control lists to make buckets or
        // objects publicly accessible, or to allow other S3 members to access or manage your objects.

        // The ACL capabilities of S3 are quite involved, so to understand this subject fully please
        // consult Amazon's documentation. The code examples below show how to put your understanding
        // of the S3 ACL mechanism into practice.
       
        // ACL settings may be provided with a bucket or object when it is created, or the ACL of
        // existing items may be updated. Let's start by creating a bucket with default (ie private)
        // access settings, then making it public.
       
        // Create a bucket in S3.
        S3Bucket publicBucket = new S3Bucket(awsCredentials.getAccessKey() + ".publicBucket");
        s3Service.createBucket(publicBucket);
       
        // Retrieve the bucket's ACL and modify it to grant public access,
        // ie READ access to the ALL_USERS group.
        AccessControlList bucketAcl = s3Service.getBucketAcl(publicBucket);
        bucketAcl.grantPermission(GroupGrantee.ALL_USERS, Permission.PERMISSION_READ);
       
        // Update the bucket's ACL. Now anyone can view the list of objects in this bucket.
        publicBucket.setAcl(bucketAcl);
        s3Service.putBucketAcl(publicBucket);
        System.out.println("View bucket's object listing here: http://s3.amazonaws.com/"
            + publicBucket.getName());
       
        // Now let's create an object that is public from scratch. Note that we will use the bucket's
        // public ACL object created above, this works fine. Although it is possible to create an
        // AccessControlList object from scratch, this is more involved as you need to set the
        // ACL's Owner information which is only readily available from an existing ACL.
       
        // Create a public object in S3. Anyone can download this object.
        S3Object publicObject = new S3Object(
            publicBucket, "publicObject.txt", "This object is public");
        publicObject.setAcl(bucketAcl);
        s3Service.putObject(publicBucket, publicObject);       
        System.out.println("View public object contents here: http://s3.amazonaws.com/"
            + publicBucket.getName() + "/" + publicObject.getKey());

        // The ALL_USERS Group is particularly useful, but there are also other grantee types
        // that can be used with AccessControlList. Please see Amazon's S3 technical documentation
        // for a fuller discussion of these settings.
       
        AccessControlList acl = new AccessControlList();
       
        // Grant access by email address. Note that this only works email address of AWS S3 members.
        acl.grantPermission(new EmailAddressGrantee("someone@somewhere.com"),
            Permission.PERMISSION_FULL_CONTROL);
       
        // Grant control of ACL settings to a known AWS S3 member.
        acl.grantPermission(new CanonicalGrantee("AWS member's ID"),
            Permission.PERMISSION_READ_ACP);
        acl.grantPermission(new CanonicalGrantee("AWS member's ID"),
            Permission.PERMISSION_WRITE_ACP);
       
    
        /*
         * Temporarily make an Object available to anyone
         */
       
        // A private object stored in S3 can be made publicy available for a limited time using a
        // signed URL. The signed URL can be used by anyone to download the object, yet it includes
        // a date and time after which the URL will no longer work.
       
        // Create a private object in S3.
        S3Bucket privateBucket = new S3Bucket(awsCredentials.getAccessKey() + ".privateBucket");
        S3Object privateObject = new S3Object(
            privateBucket, "privateObject.txt", "This object is private");
        s3Service.createBucket(privateBucket);
        s3Service.putObject(privateBucket, privateObject);       
       
        // Determine what the time will be in 5 minutes.
        Calendar cal = Calendar.getInstance();
        cal.add(Calendar.MINUTE, 5);
        Date expiryDate = cal.getTime();
       
        // Create a signed HTTP GET URL valid for 5 minutes.
        // If you use the generated URL in a web browser within 5 minutes, you will be able to view
        // the object's contents. After 5 minutes, the URL will no longer work and you will only
        // see an Access Denied message.
        String url = S3Service.createSignedGetUrl(privateBucket.getName(), privateObject.getKey(),
            awsCredentials, expiryDate, false);
        System.out.println("Signed URL: " + url);
       
       
        /*
 
View Full Code Here

    public boolean isSigned() {
        return getSignedUrl() != null;
    }   
   
    public S3Object buildObject() {
      S3Object object = new S3Object(getObjectKey());
      object.addAllMetadata(getObjectMetadata());
      return object;
    }
View Full Code Here

        }

        objectKey = URLDecoder.decode(
            urlPath, Constants.DEFAULT_ENCODING);           

        S3Object object = new S3Object(objectKey);
        object.setBucketName(bucketName);
        return object;
    }
View Full Code Here

        public void endDocument() {
        }

        public void startElement(String uri, String name, String qName, Attributes attrs) {
            if (name.equals("Contents")) {
                currentObject = new S3Object(null);
            } else if (name.equals("Owner")) {
                currentOwner = new S3Owner();
                currentObject.setOwner(currentOwner);
            } else if (name.equals("CommonPrefixes")) {
                insideCommonPrefixes = true;
View Full Code Here

TOP

Related Classes of org.jets3t.service.model.S3Object

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.