/** * Downloads an S3Object, as returned from {@link * AmazonS3Client#getObject(com.amazonaws.services.s3.model.GetObjectRequest)}, to the specified * file. * * @param s3Object The S3Object containing a reference to an InputStream containing the object's * data. * @param destinationFile The file to store the object's data in. * @param performIntegrityCheck Boolean valuable to indicate whether do the integrity check or not */ public static void downloadObjectToFile( S3Object s3Object, File destinationFile, boolean performIntegrityCheck) { // attempt to create the parent if it doesn't exist File parentDirectory = destinationFile.getParentFile(); if (parentDirectory != null && !parentDirectory.exists()) { parentDirectory.mkdirs(); } OutputStream outputStream = null; try { outputStream = new BufferedOutputStream(new FileOutputStream(destinationFile)); byte[] buffer = new byte[1024 * 10]; int bytesRead; while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) { outputStream.write(buffer, 0, bytesRead); } } catch (IOException e) { try { s3Object.getObjectContent().abort(); } catch (IOException abortException) { log.warn("Couldn't abort stream", e); } throw new AmazonClientException( "Unable to store object contents to disk: " + e.getMessage(), e); } finally { try { outputStream.close(); } catch (Exception e) { } try { s3Object.getObjectContent().close(); } catch (Exception e) { } } byte[] clientSideHash = null; byte[] serverSideHash = null; try { // Multipart Uploads don't have an MD5 calculated on the service side if (ServiceUtils.isMultipartUploadETag(s3Object.getObjectMetadata().getETag()) == false) { clientSideHash = Md5Utils.computeMD5Hash(new FileInputStream(destinationFile)); serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag()); } } catch (Exception e) { log.warn("Unable to calculate MD5 hash to validate download: " + e.getMessage(), e); } if (performIntegrityCheck && clientSideHash != null && serverSideHash != null && !Arrays.equals(clientSideHash, serverSideHash)) { throw new AmazonClientException( "Unable to verify integrity of data download. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "The data stored in '" + destinationFile.getAbsolutePath() + "' may be corrupt."); } }
/** * Returns true if the specified S3Object contains encryption info in its metadata, false * otherwise. * * @param retrievedObject An S3Object * @return True if the specified S3Object contains encryption info in its metadata, false * otherwise. * @deprecated no longer used and will be removed in the future */ @Deprecated public static boolean isEncryptionInfoInMetadata(S3Object retrievedObject) { Map<String, String> metadata = retrievedObject.getObjectMetadata().getUserMetadata(); return metadata != null && metadata.containsKey(Headers.CRYPTO_IV) && metadata.containsKey(Headers.CRYPTO_KEY); }
@Override public PointSet load(String pointSetId) throws Exception { File cachedFile; if (!workOffline) { // get pointset metadata from S3 cachedFile = new File(POINT_DIR, pointSetId + ".json"); if (!cachedFile.exists()) { POINT_DIR.mkdirs(); S3Object obj = s3.getObject(pointsetBucket, pointSetId + ".json.gz"); ObjectMetadata objMet = obj.getObjectMetadata(); FileOutputStream fos = new FileOutputStream(cachedFile); GZIPInputStream gis = new GZIPInputStream(obj.getObjectContent()); try { ByteStreams.copy(gis, fos); } finally { fos.close(); gis.close(); } } } else cachedFile = new File(POINT_DIR, pointSetId + ".json"); // grab it from the cache return PointSet.fromGeoJson(cachedFile); }
/** @see com.amazonaws.http.HttpResponseHandler#handle(com.amazonaws.http.HttpResponse) */ public AmazonWebServiceResponse<S3Object> handle(HttpResponse response) throws Exception { /* * TODO: It'd be nice to set the bucket name and key here, but the * information isn't easy to pull out of the response/request * currently. */ S3Object object = new S3Object(); AmazonWebServiceResponse<S3Object> awsResponse = parseResponseMetadata(response); if (response.getHeaders().get(Headers.REDIRECT_LOCATION) != null) { object.setRedirectLocation(response.getHeaders().get(Headers.REDIRECT_LOCATION)); } ObjectMetadata metadata = object.getObjectMetadata(); populateObjectMetadata(response, metadata); boolean hasServerSideCalculatedChecksum = !ServiceUtils.isMultipartUploadETag(metadata.getETag()); boolean responseContainsEntireObject = response.getHeaders().get("Content-Range") == null; if (hasServerSideCalculatedChecksum && responseContainsEntireObject) { byte[] expectedChecksum = BinaryUtils.fromHex(metadata.getETag()); object.setObjectContent( new S3ObjectInputStream( new ChecksumValidatingInputStream( response.getContent(), expectedChecksum, object.getBucketName() + "/" + object.getKey()), response.getHttpRequest())); } else { object.setObjectContent( new S3ObjectInputStream(response.getContent(), response.getHttpRequest())); } awsResponse.setResult(object); return awsResponse; }
/** * Returns true if this S3 object has the encryption information stored as user meta data; false * otherwise. */ final boolean hasEncryptionInfo() { ObjectMetadata metadata = s3obj.getObjectMetadata(); Map<String, String> userMeta = metadata.getUserMetadata(); return userMeta != null && userMeta.containsKey(Headers.CRYPTO_IV) && (userMeta.containsKey(Headers.CRYPTO_KEY_V2) || userMeta.containsKey(Headers.CRYPTO_KEY)); }
/** * Returns the original crypto scheme used for encryption, which may differ from the crypto scheme * used for decryption during, for example, a range-get operation. * * @param instructionFile the instruction file of the s3 object; or null if there is none. */ ContentCryptoScheme encryptionSchemeOf(Map<String, String> instructionFile) { if (instructionFile != null) { String cekAlgo = instructionFile.get(Headers.CRYPTO_CEK_ALGORITHM); return ContentCryptoScheme.fromCEKAlgo(cekAlgo); } ObjectMetadata meta = s3obj.getObjectMetadata(); Map<String, String> userMeta = meta.getUserMetadata(); String cekAlgo = userMeta.get(Headers.CRYPTO_CEK_ALGORITHM); return ContentCryptoScheme.fromCEKAlgo(cekAlgo); }
/** * Returns true if the specified S3Object is an instruction file containing encryption info, false * otherwise. * * @param instructionFile An S3Object that may potentially be an instruction file * @return True if the specified S3Object is an instruction file containing encryption info, false * otherwise. * @deprecated no longer used and will be removed in the future */ @Deprecated public static boolean isEncryptionInfoInInstructionFile(S3Object instructionFile) { if (instructionFile == null) { return false; } Map<String, String> metadata = instructionFile.getObjectMetadata().getUserMetadata(); if (metadata == null) { return false; } return metadata.containsKey(Headers.CRYPTO_INSTRUCTION_FILE); }
private S3Object getConfigObject() throws Exception { try { S3Object object = s3Client.getObject(arguments.getBucket(), arguments.getKey()); if (object.getObjectMetadata().getContentLength() > 0) { return object; } } catch (AmazonS3Exception e) { if (!isNotFoundError(e)) { throw e; } } return null; }
@Override public BufferedReader streamFrom(String key) throws IOException { S3Object s3Object; try { s3Object = s3Client.getObject(Namespaces.get().getBucket(), key); } catch (AmazonClientException ace) { throw new IOException(ace); } String[] contentTypeEncoding = guessContentTypeEncodingFromExtension(key); String encoding = contentTypeEncoding[1]; InputStream in = s3Object.getObjectContent(); try { if ("gzip".equals(encoding)) { in = new GZIPInputStream(in); } else if ("zip".equals(encoding)) { in = new ZipInputStream(in); } else if ("deflate".equals(encoding)) { in = new InflaterInputStream(in); } else if ("bzip2".equals(encoding)) { in = new BZip2CompressorInputStream(in); } else { ObjectMetadata metadata = s3Object.getObjectMetadata(); if (metadata != null) { String contentEncoding = metadata.getContentEncoding(); if ("gzip".equals(contentEncoding)) { in = new GZIPInputStream(in); } else if ("zip".equals(contentEncoding)) { in = new ZipInputStream(in); } else if ("deflate".equals(contentEncoding)) { in = new InflaterInputStream(in); } else if ("bzip2".equals(contentEncoding)) { in = new BZip2CompressorInputStream(in); } } } } catch (IOException ioe) { // Be extra sure this doesn't like in case of an error in.close(); throw ioe; } return new BufferedReader(new InputStreamReader(in, Charsets.UTF_8), 1 << 20); // ~1MB }
@Override public LoadedInstanceConfig loadConfig() throws Exception { Date lastModified; Properties properties = new Properties(); S3Object object = getConfigObject(); if (object != null) { try { lastModified = object.getObjectMetadata().getLastModified(); properties.load(object.getObjectContent()); } finally { Closeables.closeQuietly(object.getObjectContent()); } } else { lastModified = new Date(0L); } PropertyBasedInstanceConfig config = new PropertyBasedInstanceConfig(properties, defaults); return new LoadedInstanceConfig(config, lastModified.getTime()); }
/** * Builds an instruction object from the object metadata. * * @param object A non-null object that contains encryption information in its headers * @param materialsProvider The non-null encryption materials provider to be used to encrypt and * decrypt data. * @param cryptoProvider The crypto provider whose encryption implementation will be used to * encrypt and decrypt data. Null is ok and uses the preferred provider from * Security.getProviders(). * @return A non-null instruction object containing encryption information * @throws AmazonClientException if encryption information is missing in the metadata, or the * encryption materials used to encrypt the object are not available via the materials * Accessor * @deprecated no longer used and will be removed in the future */ @Deprecated public static EncryptionInstruction buildInstructionFromObjectMetadata( S3Object object, EncryptionMaterialsProvider materialsProvider, Provider cryptoProvider) { ObjectMetadata metadata = object.getObjectMetadata(); // Get encryption info from metadata. byte[] encryptedSymmetricKeyBytes = getCryptoBytesFromMetadata(Headers.CRYPTO_KEY, metadata); byte[] initVectorBytes = getCryptoBytesFromMetadata(Headers.CRYPTO_IV, metadata); String materialsDescriptionString = getStringFromMetadata(Headers.MATERIALS_DESCRIPTION, metadata); Map<String, String> materialsDescription = convertJSONToMap(materialsDescriptionString); if (encryptedSymmetricKeyBytes == null || initVectorBytes == null) { // If necessary encryption info was not found in the instruction file, throw an exception. throw new AmazonClientException( String.format( "Necessary encryption info not found in the headers of file '%s' in bucket '%s'", object.getKey(), object.getBucketName())); } EncryptionMaterials materials = retrieveOriginalMaterials(materialsDescription, materialsProvider); // If we're unable to retrieve the original encryption materials, we can't decrypt the object, // so // throw an exception. if (materials == null) { throw new AmazonClientException( String.format( "Unable to retrieve the encryption materials that originally " + "encrypted file '%s' in bucket '%s'.", object.getKey(), object.getBucketName())); } // Decrypt the symmetric key and create the symmetric cipher SecretKey symmetricKey = getDecryptedSymmetricKey(encryptedSymmetricKeyBytes, materials, cryptoProvider); CipherFactory cipherFactory = new CipherFactory(symmetricKey, Cipher.DECRYPT_MODE, initVectorBytes, cryptoProvider); return new EncryptionInstruction( materialsDescription, encryptedSymmetricKeyBytes, symmetricKey, cipherFactory); }
/* (non-Javadoc) * @see com.amazonaws.services.s3.AmazonS3#getObject(com.amazonaws.services.s3.model.GetObjectRequest, java.io.File) */ @Override public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) throws AmazonClientException, AmazonServiceException { assertParameterNotNull( destinationFile, "The destination file parameter must be specified when downloading an object directly to a file"); S3Object s3Object = getObject(getObjectRequest); // getObject can return null if constraints were specified but not met if (s3Object == null) return null; OutputStream outputStream = null; try { outputStream = new BufferedOutputStream(new FileOutputStream(destinationFile)); byte[] buffer = new byte[1024 * 10]; int bytesRead; while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) { outputStream.write(buffer, 0, bytesRead); } } catch (IOException e) { throw new AmazonClientException( "Unable to store object contents to disk: " + e.getMessage(), e); } finally { try { outputStream.close(); } catch (Exception e) { } try { s3Object.getObjectContent().close(); } catch (Exception e) { } } /* * Unlike the standard Amazon S3 Client, the Amazon S3 Encryption Client does not do an MD5 check * here because the contents stored in S3 and the contents we just retrieved are different. In * S3, the stored contents are encrypted, and locally, the retrieved contents are decrypted. */ return s3Object.getObjectMetadata(); }
public ByteBuffer get(String store, String name) throws IBlobStore.Error { try { S3Object result = s3client.getObject(store, name); long length = result.getObjectMetadata().getContentLength(); S3ObjectInputStream is = result.getObjectContent(); Log.v(TAG, " length: " + length); if (length > 1024 * 1024) throw new IBlobStore.IOError("data is too big"); byte[] buf = new byte[(int) length]; is.read(buf); is.close(); return ByteBuffer.wrap(buf); } catch (IOException e) { throw new IBlobStore.IOError("" + e); } catch (AmazonServiceException e) { if (e.getStatusCode() == 403) throw new IBlobStore.AuthError("" + e); if (e.getStatusCode() == 404) throw new IBlobStore.NotFoundError("" + e); throw new IBlobStore.IOError("" + e); } catch (AmazonClientException e) { throw new IBlobStore.IOError("" + e); } }
ObjectMetadata getObjectMetadata() { return s3obj.getObjectMetadata(); }
public long getInputFileSize() { S3Object object = s3client.getObject(new GetObjectRequest(bucketName, inputFileName)); return object.getObjectMetadata().getContentLength(); }
public static void main(String[] args) throws IOException { /* * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println( "Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println( "Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
public static void main(String[] args) { if (args.length < 2) { System.out.println("USAGE: localPath bucketname <bucketPrefix>"); System.exit(1); } String localPath = args[0]; String bucketName = args[1]; String bucketPrefix = ""; // add on extra slash if (!localPath.endsWith("/")) localPath += "/"; if (args.length == 3) bucketPrefix = args[2]; // check local dir, make if it does not exist File localDir = new File(localPath); if (!localDir.exists()) localDir.mkdirs(); if (!localDir.isDirectory()) { System.out.println("Local Dir is not a dir: " + localPath); System.exit(1); } long totalBytes = 0; long start = System.currentTimeMillis(); AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); ObjectListing listObjects = s3.listObjects(bucketName, bucketPrefix); do { for (S3ObjectSummary objectSummary : listObjects.getObjectSummaries()) { S3Object object = s3.getObject(bucketName, objectSummary.getKey()); S3ObjectInputStream inputStream = object.getObjectContent(); if ("gzip".equals(object.getObjectMetadata().getContentEncoding())) { InputStream in = null; try { totalBytes += object.getObjectMetadata().getContentLength(); in = new GZIPInputStream(inputStream); // write this sucker out String path = localPath + object.getKey(); // have to take the gz off since this is not downloading compressed! if (path.endsWith(".gz")) path = path.substring(0, path.length() - 3); System.out.print("Writing file: " + path); File check = new File(path); File parentFile = check.getParentFile(); if (!parentFile.exists()) parentFile.mkdirs(); FileOutputStream out = new FileOutputStream(path); IOUtils.copy(in, out); System.out.println(" written."); } catch (IOException e) { System.out.println("crap"); e.printStackTrace(); throw new IllegalStateException("files are too hard", e); } finally { IOUtils.closeQuietly(in); } } else { System.out.println( "unhandled content encoding: " + object.getObjectMetadata().getContentEncoding()); } } listObjects = s3.listNextBatchOfObjects(listObjects); } while (listObjects.isTruncated()); long now = System.currentTimeMillis(); System.out.println( (totalBytes / 1000.0 / 1000.0) + " mb downloaded in " + ((now - start) / 1000) + " seconds."); }
/** Returns true if this S3 object is an instruction file; false otherwise. */ final boolean isInstructionFile() { ObjectMetadata metadata = s3obj.getObjectMetadata(); Map<String, String> userMeta = metadata.getUserMetadata(); return userMeta != null && userMeta.containsKey(Headers.CRYPTO_INSTRUCTION_FILE); }