Package com.amazonaws.services.s3

Examples of com.amazonaws.services.s3.AmazonS3


     *
     * @param bucketName the bucket name.
     */
    public static void deleteBucket(final String bucketName) throws IOException {
        Properties prop = readConfig(DEFAULT_CONFIG_FILE);
        AmazonS3 s3service = openService(prop);
        ObjectListing prevObjectListing = s3service.listObjects(bucketName);
        while (true) {
            for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
                s3service.deleteObject(bucketName, s3ObjSumm.getKey());
            }
            if (!prevObjectListing.isTruncated()) {
                break;
            }
            prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
        }
        s3service.deleteBucket(bucketName);
    }
View Full Code Here


    {
        try
        {
            logger.info("Downloading " + path.getRemotePath());
            downloadCount.incrementAndGet();
            final AmazonS3 client = getS3Client();
            long contentLen = client.getObjectMetadata(getPrefix(), path.getRemotePath()).getContentLength();
            path.setSize(contentLen);
            RangeReadInputStream rris = new RangeReadInputStream(client, getPrefix(), path);
            final long bufSize = MAX_BUFFERED_IN_STREAM_SIZE > contentLen ? contentLen : MAX_BUFFERED_IN_STREAM_SIZE;
            compress.decompressAndClose(new BufferedInputStream(rris, (int)bufSize), os);
            bytesDownloaded.addAndGet(contentLen);
View Full Code Here

    @Override
    public void upload(AbstractBackupPath path, InputStream in) throws BackupRestoreException
    {
        uploadCount.incrementAndGet();
        AmazonS3 s3Client = getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(config.getBackupPrefix(), path.getRemotePath());
        InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
        DataPart part = new DataPart(config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
        List<PartETag> partETags = Lists.newArrayList();
        long chunkSize = config.getBackupChunkSize();
        if (path.getSize() > 0)
            chunkSize = (path.getSize() / chunkSize >= MAX_CHUNKS) ? (path.getSize() / (MAX_CHUNKS - 1)) : chunkSize;
        logger.info(String.format("Uploading to %s/%s with chunk size %d", config.getBackupPrefix(), path.getRemotePath(), chunkSize));
        try
        {
            Iterator<byte[]> chunks = compress.compress(in, chunkSize);
            // Upload parts.
            int partNum = 0;
            while (chunks.hasNext())
            {
                byte[] chunk = chunks.next();
                rateLimiter.acquire(chunk.length);
                DataPart dp = new DataPart(++partNum, chunk, config.getBackupPrefix(), path.getRemotePath(), initResponse.getUploadId());
                S3PartUploader partUploader = new S3PartUploader(s3Client, dp, partETags);
                executor.submit(partUploader);
                bytesUploaded.addAndGet(chunk.length);
            }
            executor.sleepTillEmpty();
            if (partNum != partETags.size())
                throw new BackupRestoreException("Number of parts(" + partNum + ")  does not match the uploaded parts(" + partETags.size() + ")");
            new S3PartUploader(s3Client, part, partETags).completeUpload();
           
            if (logger.isDebugEnabled())
            { 
               final S3ResponseMetadata responseMetadata = s3Client.getCachedResponseMetadata(initRequest);
               final String requestId = responseMetadata.getRequestId(); // "x-amz-request-id" header
               final String hostId = responseMetadata.getHostId(); // "x-amz-id-2" header
               logger.debug("S3 AWS x-amz-request-id[" + requestId + "], and x-amz-id-2[" + hostId + "]");
           
           
View Full Code Here

     * set. Removes the rule is set to 0.
     */
    @Override
    public void cleanup()
    {
        AmazonS3 s3Client = getS3Client();
        String clusterPath = pathProvider.get().clusterPrefix("");
        BucketLifecycleConfiguration lifeConfig = s3Client.getBucketLifecycleConfiguration(config.getBackupPrefix());
        if (lifeConfig == null)
        {
            lifeConfig = new BucketLifecycleConfiguration();
            List<Rule> rules = Lists.newArrayList();
            lifeConfig.setRules(rules);
        }
        List<Rule> rules = lifeConfig.getRules();
        if (updateLifecycleRule(rules, clusterPath))
        {
            if( rules.size() > 0 ){
                lifeConfig.setRules(rules);
                s3Client.setBucketLifecycleConfiguration(config.getBackupPrefix(), lifeConfig);
            }
            else
                s3Client.deleteBucketLifecycleConfiguration(config.getBackupPrefix());
        }
    }
View Full Code Here

        String bucket = req.getParameter("bucket");

        resp.setStatus(200);

        AWSCredentials myCredentials = new BasicAWSCredentials(AWS_PUBLIC_KEY_SERVER, AWS_SECRET_KEY_SERVER);
        AmazonS3 s3Client = new AmazonS3Client(myCredentials);
        s3Client.deleteObject(bucket, key);
    }
View Full Code Here

        System.out.println(String.format("Upload successfully sent to S3!  Bucket: %s, Key: %s, UUID: %s, Filename: %s",
                bucket, key, uuid, name));

        AWSCredentials myCredentials = new BasicAWSCredentials(
                                        AWS_PUBLIC_KEY_SERVER, AWS_SECRET_KEY_SERVER);
        AmazonS3 s3Client = new AmazonS3Client(myCredentials);

        java.util.Date expiration = new java.util.Date();
        long msec = expiration.getTime();
        msec += 1000 * 60 * 60; // 1 hour.
        expiration.setTime(msec);

        GeneratePresignedUrlRequest generatePresignedUrlRequest =
                      new GeneratePresignedUrlRequest(bucket, key);
        generatePresignedUrlRequest.setMethod(HttpMethod.GET); // Default.
        generatePresignedUrlRequest.setExpiration(expiration);

        URL s = s3Client.generatePresignedUrl(generatePresignedUrlRequest);

        JsonObject response = new JsonObject();
        response.addProperty("thumbnailUrl", s.toString());

        resp.getWriter().write(response.toString());
View Full Code Here

                            em.persist(_item);
                        }
                    }
                }
            } else if (field.isJsonLob()) {
                AmazonS3 s3 = em.getS3Service();

                long start3 = System.currentTimeMillis();
                String bucketName = em.getS3BucketName();
                String classRef = ai.getRootClass().getName().replace('.', '/');
                long lobTimestamp = System.currentTimeMillis() / 1000;
                String s3ObjectId = String.format("%s/%s/%08X.json", classRef, id + "-" + field.getFieldName(),
                        lobTimestamp);

                byte[] contentBytes = getJsonPayload(ob);

                InputStream input = new ByteArrayInputStream(contentBytes);

                s3.putObject(bucketName, s3ObjectId, input, null);

                em.statsS3Put(System.currentTimeMillis() - start3);
                logger.finer("setting lobkeyattribute=" + columnName + " - " + s3ObjectId);
                attsToPut.add(new ReplaceableAttribute(columnName, s3ObjectId, true));
            } else if (field.isLob()) {
                // store in s3
                AmazonS3 s3 = null;
                // todo: need to make sure we only store to S3 if it's changed,
                // too slow.
                logger.fine("putting lob to s3");
                long start3 = System.currentTimeMillis();
                s3 = em.getS3Service();
                String bucketName = em.getS3BucketName();
                String s3ObjectId = id + "-" + field.getFieldName();

                ByteArrayOutputStream bos = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bos);
                out.writeObject(ob);
                byte[] contentBytes = bos.toByteArray();
                out.close();
                InputStream input = new ByteArrayInputStream(contentBytes);

                s3.putObject(bucketName, s3ObjectId, input, null);

                em.statsS3Put(System.currentTimeMillis() - start3);
                logger.finer("setting lobkeyattribute=" + columnName + " - " + s3ObjectId);
                attsToPut.add(new ReplaceableAttribute(columnName, s3ObjectId, true));
            } else if (field.getEnumType() != null) {
View Full Code Here

        return factory.getExecutor();
    }

    public Object getObjectFromS3(String idOnS3) throws AmazonClientException, IOException, ClassNotFoundException {
        long start = System.currentTimeMillis();
        AmazonS3 s3 = factory.getS3Service();
        S3Object s3o = s3.getObject(factory.getS3BucketName(), idOnS3);
        logger.fine("got s3object=" + s3o);
        Object ret = null;
        try {
            ObjectInputStream reader = new ObjectInputStream(new BufferedInputStream((s3o.getObjectContent())));
            ret = reader.readObject();
View Full Code Here

    public <T> T getJsonObjectFromS3(String idOnS3, Class<T> retType) throws JsonParseException, JsonMappingException,
            IOException {
        ObjectMapper objectMapper = getObjectMapper();

        long start = System.currentTimeMillis();
        AmazonS3 s3 = factory.getS3Service();
        S3Object s3o = s3.getObject(factory.getS3BucketName(), idOnS3);
        logger.fine("got s3object=" + s3o);
        T ret = null;
        InputStream objectContent = null;
        try {
            objectContent = s3o.getObjectContent();
View Full Code Here

         * Important: Be sure to fill in your AWS access credentials in the
         *            AwsCredentials.properties file before you try to run this
         *            sample.
         * http://aws.amazon.com/security-credentials
         */
        AmazonS3 s3 = new AmazonS3Client(new PropertiesCredentials(
                S3Sample.class.getResourceAsStream("AwsCredentials.properties")));

        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
        String key = "MyObjectKey";

        System.out.println("===========================================");
        System.out.println("Getting Started with Amazon S3");
        System.out.println("===========================================\n");

        try {
            /*
             * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
             * so once a bucket name has been taken by any user, you can't create
             * another bucket with that same name.
             *
             * You can optionally specify a location for your bucket if you want to
             * keep your data closer to your applications or users.
             */
            System.out.println("Creating bucket " + bucketName + "\n");
            s3.createBucket(bucketName);

            /*
             * List the buckets in your account
             */
            System.out.println("Listing buckets");
            for (Bucket bucket : s3.listBuckets()) {
                System.out.println(" - " + bucket.getName());
            }
            System.out.println();

            /*
             * Upload an object to your bucket - You can easily upload a file to
             * S3, or upload directly an InputStream if you know the length of
             * the data in the stream. You can also specify your own metadata
             * when uploading to S3, which allows you set a variety of options
             * like content-type and content-encoding, plus additional metadata
             * specific to your applications.
             */
            System.out.println("Uploading a new object to S3 from a file\n");
            s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

            /*
             * Download an object - When you download an object, you get all of
             * the object's metadata and a stream from which to read the contents.
             * It's important to read the contents of the stream as quickly as
             * possibly since the data is streamed directly from Amazon S3 and your
             * network connection will remain open until you read all the data or
             * close the input stream.
             *
             * GetObjectRequest also supports several other options, including
             * conditional downloading of objects based on modification times,
             * ETags, and selectively downloading a range of an object.
             */
            System.out.println("Downloading an object");
            S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
            System.out.println("Content-Type: "  + object.getObjectMetadata().getContentType());
            displayTextInputStream(object.getObjectContent());

            /*
             * List objects in your bucket by prefix - There are many options for
             * listing the objects in your bucket.  Keep in mind that buckets with
             * many objects might truncate their results when listing their objects,
             * so be sure to check if the returned object listing is truncated, and
             * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
             * additional results.
             */
            System.out.println("Listing objects");
            ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
                    .withBucketName(bucketName)
                    .withPrefix("My"));
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                System.out.println(" - " + objectSummary.getKey() + "  " +
                                   "(size = " + objectSummary.getSize() + ")");
            }
            System.out.println();

            /*
             * Delete an object - Unless versioning has been turned on for your bucket,
             * there is no way to undelete an object, so use caution when deleting objects.
             */
            System.out.println("Deleting an object\n");
            s3.deleteObject(bucketName, key);

            /*
             * Delete a bucket - A bucket must be completely empty before it can be
             * deleted, so remember to delete any objects from your buckets before
             * you try to delete them.
             */
            System.out.println("Deleting bucket " + bucketName + "\n");
            s3.deleteBucket(bucketName);
        } catch (AmazonServiceException ase) {
            System.out.println("Caught an AmazonServiceException, which means your request made it "
                    + "to Amazon S3, but was rejected with an error response for some reason.");
            System.out.println("Error Message:    " + ase.getMessage());
            System.out.println("HTTP Status Code: " + ase.getStatusCode());
View Full Code Here

TOP

Related Classes of com.amazonaws.services.s3.AmazonS3

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.