@Override public int read( String table, String key, Set<String> fields, HashMap<String, ByteIterator> result) { logger.debug("readkey: " + key + " from table: " + table); GetItemRequest req = new GetItemRequest(table, createPrimaryKey(key)); req.setAttributesToGet(fields); req.setConsistentRead(consistentRead); GetItemResult res = null; try { res = dynamoDB.getItem(req); } catch (AmazonServiceException ex) { logger.error(ex.getMessage()); return SERVER_ERROR; } catch (AmazonClientException ex) { logger.error(ex.getMessage()); return CLIENT_ERROR; } if (null != res.getItem()) { result.putAll(extractResult(res.getItem())); logger.debug("Result: " + res.toString()); } return OK; }
@Override public void progressChanged(ProgressEvent e) { if (null == upload) return; TransferProgress xProgress = upload.getProgress(); System.out.print( "\r " + String.format("%.2f", xProgress.getPercentTransfered()) + "% " + asNumber(xProgress.getBytesTransfered()) + "/" + asNumber(contentLen) + BLANK_LINE); switch (e.getEventCode()) { case ProgressEvent.COMPLETED_EVENT_CODE: { System.out.println("Done"); break; } case ProgressEvent.FAILED_EVENT_CODE: { try { AmazonClientException exc = upload.waitForException(); System.err.println("Unable to upload file: " + exc.getMessage()); } catch (InterruptedException ignored) { } break; } } }
protected FormValidation doTestConnection( URL ec2endpoint, boolean useInstanceProfileForCredentials, String accessId, String secretKey, String privateKey) throws IOException, ServletException { try { AWSCredentialsProvider credentialsProvider = createCredentialsProvider(useInstanceProfileForCredentials, accessId, secretKey); AmazonEC2 ec2 = connect(credentialsProvider, ec2endpoint); ec2.describeInstances(); if (privateKey == null) return FormValidation.error( "Private key is not specified. Click 'Generate Key' to generate one."); if (privateKey.trim().length() > 0) { // check if this key exists EC2PrivateKey pk = new EC2PrivateKey(privateKey); if (pk.find(ec2) == null) return FormValidation.error( "The EC2 key pair private key isn't registered to this EC2 region (fingerprint is " + pk.getFingerprint() + ")"); } return FormValidation.ok(Messages.EC2Cloud_Success()); } catch (AmazonClientException e) { LOGGER.log(Level.WARNING, "Failed to check EC2 credential", e); return FormValidation.error(e.getMessage()); } }
public LinkedList<String> listNames(String prefix, String bucketName, String[] canonicalIDs) throws StorageCloudException { LinkedList<String> find = new LinkedList<String>(); try { ObjectListing objectListing = null; if (bucketName == null) objectListing = conn.listObjects( new ListObjectsRequest().withBucketName(defaultBucketName).withPrefix(prefix)); else { bucketName = bucketName.concat(location); objectListing = conn.listObjects( new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix)); } for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { find.add(objectSummary.getKey()); } } catch (AmazonServiceException e1) { throw new ServiceSiteException("AWSS3Exception::" + e1.getMessage()); } catch (AmazonClientException e2) { throw new ClientServiceException("AWSS3Exception::" + e2.getMessage()); } return find; }
private List<JsonNode> pullMessages(int interval) { List<JsonNode> msgs = new ArrayList<JsonNode>(); if (!isBlank(QUEUE_URL)) { try { ReceiveMessageRequest receiveReq = new ReceiveMessageRequest(QUEUE_URL); receiveReq.setMaxNumberOfMessages(MAX_MESSAGES); receiveReq.setWaitTimeSeconds(interval); List<Message> list = sqs.receiveMessage(receiveReq).getMessages(); if (list != null && !list.isEmpty()) { if (DEBUG) logger.info("Received {} messages from queue.", list.size()); List<DeleteMessageBatchRequestEntry> deletionList = new ArrayList<DeleteMessageBatchRequestEntry>(); for (Message message : list) { if (!isBlank(message.getBody())) { msgs.add(mapper.readTree(message.getBody())); } deletionList.add( new DeleteMessageBatchRequestEntry( UUID.randomUUID().toString(), message.getReceiptHandle())); } sqs.deleteMessageBatch(QUEUE_URL, deletionList); } } catch (IOException ex) { logger.error(ex.getMessage()); } catch (AmazonServiceException ase) { logException(ase); } catch (AmazonClientException ace) { logger.error("Could not reach SQS. {}", ace.getMessage()); } } return msgs; }
@Override public List<KinesisMessageModel> emit(final UnmodifiableBuffer<KinesisMessageModel> buffer) { List<KinesisMessageModel> items = buffer.getRecords(); List<KinesisMessageModel> failedItems = new ArrayList<KinesisMessageModel>(); // failedItems. DynamoDBMapper mapper = new DynamoDBMapper(dynamoDBClient); KinesisMessageModel message = null; try { mapper.batchSave(items); LOG.info("Successfully emitted: " + items.size() + " items"); } catch (AmazonClientException e) { e.printStackTrace(); // } // LOG.info("Successfully emitted " + (items.size() - failedItems.size()) + " records into // DynamoDB."); // failedItems. // return failedItems; return failedItems; }
private static void basicAbortMPU() throws IOException { System.out.println("basic abort MPU"); String bucketName = "chttest"; String fileName = "hello.txt"; // String uploadID = "XHGTFV4F5XTEAC5O8N3LK12TIY3DSY7OFPXIWTHRMNTE7A3WB5M8N2U5AN"; //hi String uploadID = "LE5JS2K6C208JU7ZX1QD2TVRWXOWWF4VNG7LE7TFIX5SYNG4HLOGW9CLAD"; // hello AbortMultipartUploadRequest request = new AbortMultipartUploadRequest(bucketName, fileName, uploadID); AmazonS3 s3 = new AmazonS3Client( new PropertiesCredentials( putBucket.class.getResourceAsStream("AwsCredentials.properties"))); try { s3.abortMultipartUpload(request); System.out.println(); } catch (AmazonServiceException ase) { System.out.println( "Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println( "Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
@Override public F.Promise<Result> getDownload(String key, String name) { GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucketName, key); ResponseHeaderOverrides responseHeaders = new ResponseHeaderOverrides(); responseHeaders.setContentDisposition("attachment; filename=" + name); generatePresignedUrlRequest.setResponseHeaders(responseHeaders); AmazonS3 amazonS3 = new AmazonS3Client(credentials); try { URL url = amazonS3.generatePresignedUrl(generatePresignedUrlRequest); return F.Promise.pure(redirect(url.toString())); } catch (AmazonClientException ace) { Logger.error( "Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network." + " Error Message: " + ace.getMessage()); return F.Promise.pure(internalServerError("Download failed")); } }
public String[] setAcl(String bucketNameToShare, String[] canonicalId, String permission) throws StorageCloudException { boolean withRead = false; if (bucketNameToShare != null) { bucketNameToShare = bucketNameToShare.concat(location); if (!conn.doesBucketExist(bucketNameToShare)) { conn.createBucket(bucketNameToShare, region); } } else { return null; } // set acl AccessControlList acl = conn.getBucketAcl(bucketNameToShare); for (int i = 0; i < canonicalId.length; i++) { if (permission.equals("rw")) { CanonicalGrantee grantee = new CanonicalGrantee(canonicalId[i]); acl.grantPermission(grantee, Permission.Read); acl.grantPermission(grantee, Permission.Write); withRead = true; } else if (permission.equals("r")) { acl.grantPermission(new CanonicalGrantee(canonicalId[i]), Permission.Read); withRead = true; } else if (permission.equals("w")) { acl.grantPermission(new CanonicalGrantee(canonicalId[i]), Permission.Write); } } try { if (withRead) { ObjectListing objectListing = conn.listObjects(bucketNameToShare); AccessControlList aclKeys = null; for (S3ObjectSummary elem : objectListing.getObjectSummaries()) { aclKeys = conn.getObjectAcl(bucketNameToShare, elem.getKey()); for (int i = 0; i < canonicalId.length; i++) { aclKeys.grantPermission(new CanonicalGrantee(canonicalId[i]), Permission.Read); } conn.setObjectAcl(bucketNameToShare, elem.getKey(), aclKeys); } } // confirm if acl well conn.setBucketAcl(bucketNameToShare, acl); AccessControlList newAcl = conn.getBucketAcl(bucketNameToShare); Set<Grant> grants = newAcl.getGrants(); boolean flag = false; for (Grant grant : grants) { if (grant.getGrantee().getIdentifier().equals(canonicalId[0])) { flag = true; } } if (!flag) { throw new ServiceSiteException("AWSS3Exception:: ACL"); } } catch (AmazonServiceException e1) { throw new ServiceSiteException("AWSS3Exception::" + e1.getMessage()); } catch (AmazonClientException e2) { throw new ClientServiceException("AWSS3Exception::" + e2.getMessage()); } return canonicalId; }
public void storeMessageInQueue(String message) { // System.out.println("Storing Message..."); // Send a message if (driectDbConnection) { DbConnector db = null; try { db = dbf.createDBConnectorInstance(); } catch (MailAppDBException e) { // TODO Auto-generated catch block e.printStackTrace(); } MailAppMessage mailMsg = new MailAppMessage(message, "" + System.currentTimeMillis() + "_" + UUID.randomUUID()); try { // printDebug("Calling DB-Instance to store message "+mailMsg.getTimeuuid()+" length: // "+mailMsg.getSize()+"\n"+mailMsg.getWholeMessageWITHOUT2LINES()); db.storeMessage(mailMsg); } catch (MailAppDBException mae) { // MailStoringWorkerStarter.stopMailStoringWorker(MailStoringWorkerStarter.getWorkerList(), this); // stop=true; mae.printStackTrace(); } } else { try { sqs.sendMessage(new SendMessageRequest(mailQueueUrl, message)); } catch (AmazonServiceException ase) { System.out.println( "Caught an AmazonServiceException, which means your request made it " + "to Amazon SQS, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println( "Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SQS, such as not " + "being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } printDebug("Message stored to " + mailQueueUrl); // connect to db ( Cassandra ) and store message // OR write Message to SQS to be consumed by Storage Worker -> // better? more scalable, db independent } }
public static void sendMessage(AmazonSQS sqs, String sqsUrl, String info) { try { sqs.sendMessage(new SendMessageRequest(sqsUrl, info)); logger.info("Message sent to queue: " + info); } catch (AmazonServiceException e) { logger.severe(e.getMessage()); } catch (AmazonClientException e) { logger.severe(e.getMessage()); } }
public FormValidation doCheckUseInstanceProfileForCredentials(@QueryParameter boolean value) { if (value) { try { new InstanceProfileCredentialsProvider().getCredentials(); } catch (AmazonClientException e) { return FormValidation.error( Messages.EC2Cloud_FailedToObtainCredentailsFromEC2(), e.getMessage()); } } return FormValidation.ok(); }
public static void deleteTaskMessage(Message msg, String sqsUrl, AmazonSQS sqs) { String handle = msg.getReceiptHandle(); try { sqs.deleteMessage(new DeleteMessageRequest(sqsUrl, handle)); logger.info("Message deleted: " + msg.getBody()); } catch (AmazonServiceException e) { logger.severe(e.getMessage()); } catch (AmazonClientException e) { logger.severe(e.getMessage()); } }
public boolean uploadToS3(String fileName, String text) throws IOException { try { String folderName = this.p.getProperty("KEY_NAME"); String bucketName = this.p.getProperty("AWS_BUCKET_NAME"); logger.info(LogKey.MESSAGE, "Uploading a new object to S3 from a file"); byte[] contentAsBytes = text.getBytes("UTF-8"); ByteArrayInputStream contentsAsStream = new ByteArrayInputStream(contentAsBytes); ObjectMetadata md = new ObjectMetadata(); md.setContentLength(contentAsBytes.length); if (this.s3client == null) { logger.debug(LogKey.MESSAGE, "reuse S3 client !!"); this.s3client = this.getS3Client(); } logger.info(LogKey.BUCKET_NAME, bucketName); logger.info(LogKey.KEY_NAME, folderName); logger.info(LogKey.FILE_NAME, fileName); this.s3client.putObject( new PutObjectRequest( bucketName, folderName + File.separator + fileName, contentsAsStream, md)); return true; } catch (AmazonServiceException ase) { logger.error(LogKey.EXCEPTION, ase); logger.error( LogKey.EXCEPTION, "Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); logger.error(LogKey.EXCEPTION, "Error Message: " + ase.getMessage()); logger.error(LogKey.EXCEPTION, "HTTP Status Code: " + ase.getStatusCode()); logger.error(LogKey.EXCEPTION, "AWS Error Code: " + ase.getErrorCode()); logger.error(LogKey.EXCEPTION, "Error Type: " + ase.getErrorType()); logger.error(LogKey.EXCEPTION, "Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { logger.error( LogKey.EXCEPTION, "Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); logger.error(LogKey.EXCEPTION, "Error Message: " + ace.getMessage()); } return false; }
private void createAmazonS3Bucket() { try { if (tx.getAmazonS3Client().doesBucketExist(bucketName) == false) { tx.getAmazonS3Client().createBucket(bucketName); } } catch (AmazonClientException ace) { JOptionPane.showMessageDialog( frame, "Unable to create a new Amazon S3 bucket: " + ace.getMessage(), "Error Creating Bucket", JOptionPane.ERROR_MESSAGE); } }
public void deleteUrlList(UrlList urlList) { // first get hash key from dynamo db try { String s3Key = Hash.hashKey(urlList.getParentUrl()); s3client.deleteObject(bucketName, s3Key); } catch (AmazonClientException e) { System.out.println( "S3UrlListDA : Exception while deleting document with id - " + urlList.getParentUrl()); e.printStackTrace(); } catch (NoSuchAlgorithmException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
// Appends a S3 http:// and bucket address to file path. public static String generateS3FileAddress(AmazonS3 s3, String path) { String address; try { address = s3.getBucketLocation(bucket); } catch (AmazonServiceException e) { logger.severe(e.getMessage()); return null; } catch (AmazonClientException e) { logger.severe(e.getMessage()); return null; } return "https://s3-" + address + ".amazonaws.com/" + bucket + "/" + path; }
/** * Gets an object stored in S3 and downloads it into the specified file. This method includes the * one-time retry mechanism after integrity check failure on the downloaded file. It will also * return immediately after getting null valued S3Object (when getObject request does not meet the * specified constraints). * * @param file The file to store the object's data in. * @param safeS3DownloadTask The implementation of SafeS3DownloadTask interface which allows user * to get access to all the visible variables at the calling site of this method. */ public static S3Object retryableDownloadS3ObjectToFile( File file, RetryableS3DownloadTask retryableS3DownloadTask) { boolean hasRetried = false; boolean needRetry; S3Object s3Object; do { needRetry = false; s3Object = retryableS3DownloadTask.getS3ObjectStream(); if (s3Object == null) return null; try { ServiceUtils.downloadObjectToFile( s3Object, file, retryableS3DownloadTask.needIntegrityCheck()); } catch (AmazonClientException ace) { // Determine whether an immediate retry is needed according to the captured // AmazonClientException. // (There are three cases when downloadObjectToFile() throws AmazonClientException: // 1) SocketException or SSLProtocolException when writing to disk (e.g. when user aborts // the download) // 2) Other IOException when writing to disk // 3) MD5 hashes don't match // The current code will retry the download only when case 2) or 3) happens. if (ace.getCause() instanceof SocketException || ace.getCause() instanceof SSLProtocolException) { throw ace; } else { needRetry = true; if (hasRetried) throw ace; else { log.info( "Retry the download of object " + s3Object.getKey() + " (bucket " + s3Object.getBucketName() + ")", ace); hasRetried = true; } } } finally { try { s3Object.getObjectContent().abort(); } catch (IOException e) { } } } while (needRetry); return s3Object; }
/** * ReRegion can retry on some AWS calls. * * @throws Exception If some problem inside */ @Test public void retriesAwsCalls() throws Exception { final Table table = Mockito.mock(Table.class); final Attributes attrs = new Attributes(); final String msg = "hey you"; Mockito.doThrow(new AmazonClientException(msg)).when(table).put(attrs); final Region origin = Mockito.mock(Region.class); Mockito.doReturn(table).when(origin).table(Mockito.anyString()); final Region region = new ReRegion(origin); try { region.table("test").put(attrs); Assert.fail("exception expected here"); } catch (final AmazonClientException ex) { assert ex.getMessage().equals(msg); } Mockito.verify(table, Mockito.times(Tv.THREE)).put(attrs); }
// Makes a putObjectRequest to make file public, and sends request. private static boolean putObject(AmazonS3 s3, PutObjectRequest req) { // Set file as public. req.withCannedAcl(CannedAccessControlList.PublicRead); // Send upload request. try { s3.putObject(req); } catch (AmazonServiceException e) { logger.severe(e.getMessage()); return false; } catch (AmazonClientException e) { logger.severe(e.getMessage()); return false; } return true; }
public static List<Message> getMessages(ReceiveMessageRequest req, AmazonSQS sqs) { logger.fine("Getting SQS messages."); List<Message> msgs; try { msgs = sqs.receiveMessage(req).getMessages(); } catch (AmazonServiceException e) { logger.severe(e.getMessage()); return null; } catch (AmazonClientException e) { logger.severe(e.getMessage()); return null; } return msgs; }
@Override public int delete(String table, String key) { logger.debug("deletekey: " + key + " from table: " + table); DeleteItemRequest req = new DeleteItemRequest(table, createPrimaryKey(key)); DeleteItemResult res = null; try { res = dynamoDB.deleteItem(req); } catch (AmazonServiceException ex) { logger.error(ex.getMessage()); return SERVER_ERROR; } catch (AmazonClientException ex) { logger.error(ex.getMessage()); return CLIENT_ERROR; } return OK; }
/** Verifies the RetryCondition has collected the expected context information. */ public static void verifyExpectedContextData( ContextDataCollection contextDataCollection, AmazonWebServiceRequest failedRequest, AmazonClientException expectedException, int expectedRetries) { Assert.assertEquals(expectedRetries, contextDataCollection.failedRequests.size()); Assert.assertEquals(expectedRetries, contextDataCollection.exceptions.size()); Assert.assertEquals(expectedRetries, contextDataCollection.retriesAttemptedValues.size()); if (expectedRetries > 0) { // It should keep getting the same original request instance for (AmazonWebServiceRequest seenRequest : contextDataCollection.failedRequests) { Assert.assertTrue(seenRequest == failedRequest); } // Verify the exceptions if (expectedException instanceof AmazonServiceException) { // It should get service exceptions with the expected error and // status code AmazonServiceException ase = (AmazonServiceException) expectedException; for (AmazonClientException seenException : contextDataCollection.exceptions) { Assert.assertTrue(seenException instanceof AmazonServiceException); Assert.assertEquals( ase.getErrorCode(), ((AmazonServiceException) seenException).getErrorCode()); Assert.assertEquals( ase.getStatusCode(), ((AmazonServiceException) seenException).getStatusCode()); } } else { // Client exceptions should have the same expected cause (the // same // throwable instance from the mock HttpClient). Throwable expectedCause = expectedException.getCause(); for (AmazonClientException seenException : contextDataCollection.exceptions) { Assert.assertTrue(expectedCause == seenException.getCause()); } } // It should get "retriesAttempted" values starting from 0 int expectedRetriesAttempted = 0; for (int seenRetriesAttempted : contextDataCollection.retriesAttemptedValues) { Assert.assertEquals(expectedRetriesAttempted++, seenRetriesAttempted); } } }
@Inject public AmazonS3Storage(Configuration configuration) { bucketName = configuration.getString("storage.s3.bucket"); String accessKey = configuration.getString("storage.s3.accesskey"); String secretKey = configuration.getString("storage.s3.secretkey"); credentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3 amazonS3 = new AmazonS3Client(credentials); try { if (!(amazonS3.doesBucketExist(bucketName))) { amazonS3.createBucket(new CreateBucketRequest(bucketName)); } String bucketLocation = amazonS3.getBucketLocation(new GetBucketLocationRequest(bucketName)); Logger.info("Amazon S3 bucket created at " + bucketLocation); } catch (AmazonServiceException ase) { Logger.error( "Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response " + "for some reason." + " Error Message: " + ase.getMessage() + " HTTP Status Code: " + ase.getStatusCode() + " AWS Error Code: " + ase.getErrorCode() + " Error Type: " + ase.getErrorType() + " Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { Logger.error( "Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network." + " Error Message: " + ace.getMessage()); } }
public void storeMessageInS3(String message) { AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); String bucketName = "mailAppTestBucket_1"; String key = "message_" + UUID.randomUUID(); // System.out.println("Uploading a new object to S3 from a file\n"); try { s3.putObject(new PutObjectRequest(bucketName, key, createMessageFile(message))); } catch (AmazonServiceException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (AmazonClientException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
public static String xs3_generate_url(String xs3_objname, String content_type) { AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key); ClientConfiguration xs3_clientconfig = new ClientConfiguration(); xs3_clientconfig.setProtocol(Protocol.HTTP); S3ClientOptions xs3_client_options = new S3ClientOptions(); xs3_client_options.setPathStyleAccess(true); xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig); xs3_client.setEndpoint(xs3_endpoint); xs3_client.setS3ClientOptions(xs3_client_options); try { java.util.Date expiration = new java.util.Date(); long milliSeconds = expiration.getTime(); milliSeconds += 1000 * 60 * 5; expiration.setTime(milliSeconds); GeneratePresignedUrlRequest xs3_genurl_req = new GeneratePresignedUrlRequest(xs3_bucketname, xs3_objname); xs3_genurl_req.setMethod(HttpMethod.PUT); xs3_genurl_req.setExpiration(expiration); xs3_genurl_req.setContentType(content_type); xs3_genurl_req.addRequestParameter("x-amz-acl", "public-read"); URL url = xs3_client.generatePresignedUrl(xs3_genurl_req); System.out.println(url.toString()); return url.toString(); } catch (AmazonServiceException ase) { System.out.println("xs3_svr_error_message:" + ase.getMessage()); System.out.println("xs3_svr_status_code: " + ase.getStatusCode()); System.out.println("xs3_svr_error_code: " + ase.getErrorCode()); System.out.println("xs3_svr_error_type: " + ase.getErrorType()); System.out.println("xs3_svr_request_id: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("xs3_clt_error_message:" + ace.getMessage()); } return null; }
@Override public int insert(String table, String key, HashMap<String, ByteIterator> values) { logger.debug("insertkey: " + primaryKeyName + "-" + key + " from table: " + table); Map<String, AttributeValue> attributes = createAttributes(values); // adding primary key attributes.put(primaryKeyName, new AttributeValue(key)); PutItemRequest putItemRequest = new PutItemRequest(table, attributes); PutItemResult res = null; try { res = dynamoDB.putItem(putItemRequest); } catch (AmazonServiceException ex) { logger.error(ex.getMessage()); return SERVER_ERROR; } catch (AmazonClientException ex) { logger.error(ex.getMessage()); return CLIENT_ERROR; } return OK; }
private static void moveToS3(String outputFilePath, String existingBucketName) throws InterruptedException { System.setProperty("aws.accessKeyId", AWS_ACCESS_KEY_ID); System.setProperty("aws.secretKey", AWS_ACCESS_SECRET_KEY); File outputFile = new File(outputFilePath); String keyName = outputFile.getName(); TransferManager tm = new TransferManager(new SystemPropertiesCredentialsProvider()); // TransferManager processes all transfers asynchronously, // so this call will return immediately. Upload upload = tm.upload(existingBucketName, keyName, new File(outputFilePath)); try { // Or you can block and wait for the upload to finish upload.waitForCompletion(); System.out.println("Upload complete."); } catch (AmazonClientException amazonClientException) { System.out.println("Unable to upload file, upload was aborted."); amazonClientException.printStackTrace(); } }
/** * Check that the AMI requested is available in the cloud and can be used. */ public FormValidation doValidateAmi( @QueryParameter boolean useInstanceProfileForCredentials, @QueryParameter String credentialsId, @QueryParameter String ec2endpoint, @QueryParameter String region, final @QueryParameter String ami) throws IOException { AWSCredentialsProvider credentialsProvider = EC2Cloud.createCredentialsProvider(useInstanceProfileForCredentials, credentialsId); AmazonEC2 ec2; if (region != null) { ec2 = EC2Cloud.connect(credentialsProvider, AmazonEC2Cloud.getEc2EndpointUrl(region)); } else { ec2 = EC2Cloud.connect(credentialsProvider, new URL(ec2endpoint)); } if (ec2 != null) { try { List<String> images = new LinkedList<String>(); images.add(ami); List<String> owners = new LinkedList<String>(); List<String> users = new LinkedList<String>(); DescribeImagesRequest request = new DescribeImagesRequest(); request.setImageIds(images); request.setOwners(owners); request.setExecutableUsers(users); List<Image> img = ec2.describeImages(request).getImages(); if (img == null || img.isEmpty()) { // de-registered AMI causes an empty list to be // returned. so be defensive // against other possibilities return FormValidation.error("No such AMI, or not usable with this accessId: " + ami); } String ownerAlias = img.get(0).getImageOwnerAlias(); return FormValidation.ok( img.get(0).getImageLocation() + (ownerAlias != null ? " by " + ownerAlias : "")); } catch (AmazonClientException e) { return FormValidation.error(e.getMessage()); } } else return FormValidation.ok(); // can't test }
public static String xs3_init_multi_upload(String xs3_objname, int file_size, String file_type) { AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key); ClientConfiguration xs3_clientconfig = new ClientConfiguration(); xs3_clientconfig.setProtocol(Protocol.HTTP); S3ClientOptions xs3_client_options = new S3ClientOptions(); xs3_client_options.setPathStyleAccess(true); xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig); xs3_client.setEndpoint(xs3_endpoint); xs3_client.setS3ClientOptions(xs3_client_options); try { InitiateMultipartUploadRequest xs3_multi_req = new InitiateMultipartUploadRequest(xs3_bucketname, xs3_objname); xs3_multi_req.setCannedACL(CannedAccessControlList.PublicRead); ObjectMetadata xs3_meta = new ObjectMetadata(); xs3_meta.setContentType(file_type); xs3_multi_req.setObjectMetadata(xs3_meta); InitiateMultipartUploadResult xs3_multi_res = xs3_client.initiateMultipartUpload(xs3_multi_req); String xs3_multi_uploadid = xs3_multi_res.getUploadId(); String json_urls = gen_part_url(xs3_multi_uploadid, file_size, xs3_objname, file_type); return json_urls; } catch (AmazonServiceException ase) { System.out.println("xs3_svr_error_message:" + ase.getMessage()); System.out.println("xs3_svr_status_code: " + ase.getStatusCode()); System.out.println("xs3_svr_error_code: " + ase.getErrorCode()); System.out.println("xs3_svr_error_type: " + ase.getErrorType()); System.out.println("xs3_svr_request_id: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("xs3_clt_error_message:" + ace.getMessage()); } return null; }