예제 #1
0
  public static void createBucket(AmazonS3Client s3, String bucketName, String zone) {
    try {

      List<Bucket> bucketList = s3.listBuckets();

      for (Bucket bucket : bucketList) {
        // System.out.println(bucket.getName());
        if (bucketName.equalsIgnoreCase(bucket.getName())) {
          System.out.println("Using bucket " + bucketName);
          return;
        }
      }

      System.out.println("Created s3 bucket " + bucketName);
      s3.createBucket(bucketName);

      return;

    } catch (AmazonServiceException ase) {
      System.out.println("Caught Exception: " + ase.getMessage());
      System.out.println("Reponse Status Code: " + ase.getStatusCode());
      System.out.println("Error Code: " + ase.getErrorCode());
      System.out.println("Request ID: " + ase.getRequestId());
    }
  }
예제 #2
0
 private S3Object getS3Object(final Path path, final long start) throws IOException {
   try {
     return retry()
         .maxAttempts(maxClientRetry)
         .exponentialBackoff(
             new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0)
         .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class)
         .run(
             "getS3Object",
             () -> {
               try {
                 return s3.getObject(
                     new GetObjectRequest(host, keyFromPath(path))
                         .withRange(start, Long.MAX_VALUE));
               } catch (AmazonServiceException e) {
                 if (e.getStatusCode() == SC_FORBIDDEN) {
                   throw new UnrecoverableS3OperationException(e);
                 }
                 throw Throwables.propagate(e);
               }
             });
   } catch (InterruptedException e) {
     Thread.currentThread().interrupt();
     throw Throwables.propagate(e);
   } catch (Exception e) {
     Throwables.propagateIfInstanceOf(e, IOException.class);
     throw Throwables.propagate(e);
   }
 }
 private void updateItemVersionedInternal(
     String tableName,
     Key key,
     Map<String, AttributeValueUpdate> attributes,
     String expectedVersion,
     PersistentEntity persistentEntity,
     int attempt)
     throws DataAccessException {
   UpdateItemRequest request =
       new UpdateItemRequest(tableName, key, attributes)
           .withExpected(getOptimisticVersionCondition(expectedVersion));
   try {
     ddb.updateItem(request);
   } catch (AmazonServiceException e) {
     if (DynamoDBUtil.AWS_ERR_CODE_CONDITIONAL_CHECK_FAILED.equals(e.getErrorCode())) {
       throw new OptimisticLockingException(persistentEntity, key);
     } else if (DynamoDBUtil.AWS_ERR_CODE_RESOURCE_NOT_FOUND.equals(e.getErrorCode())) {
       throw new IllegalArgumentException("no such table: " + tableName, e);
     } else if (DynamoDBUtil.AWS_STATUS_CODE_SERVICE_UNAVAILABLE == e.getStatusCode()) {
       // retry after a small pause
       DynamoDBUtil.sleepBeforeRetry(attempt);
       attempt++;
       updateItemVersionedInternal(
           tableName, key, attributes, expectedVersion, persistentEntity, attempt);
     } else {
       throw new DataStoreOperationException(
           "problem with table: " + tableName + ", key: " + key + ", attributes: " + attributes,
           e);
     }
   }
 }
예제 #4
0
  private static void createKey(String keyName, AmazonEC2 ec2) {
    try {
      List<KeyPairInfo> keyPairList = ec2.describeKeyPairs().getKeyPairs();
      for (KeyPairInfo keyPair : keyPairList) {
        if (keyName.equalsIgnoreCase(keyPair.getKeyName())) {
          System.out.println("Using key " + keyName);
          return;
        }
      }
      System.out.println("Creating key " + keyName + "in local directory");
      CreateKeyPairRequest newKeyRequest = new CreateKeyPairRequest();
      newKeyRequest.setKeyName(keyName);
      CreateKeyPairResult keyresult = ec2.createKeyPair(newKeyRequest);
      KeyPair keyPair = new KeyPair();
      keyPair = keyresult.getKeyPair();
      String privateKey = keyPair.getKeyMaterial();
      writeKeytoFile(keyName, privateKey);

    } catch (AmazonServiceException ase) {
      System.out.println("Caught Exception: " + ase.getMessage());
      System.out.println("Reponse Status Code: " + ase.getStatusCode());
      System.out.println("Error Code: " + ase.getErrorCode());
      System.out.println("Request ID: " + ase.getRequestId());
    }
  }
  private static void basicAbortMPU() throws IOException {
    System.out.println("basic abort MPU");
    String bucketName = "chttest";
    String fileName = "hello.txt";
    // String uploadID = "XHGTFV4F5XTEAC5O8N3LK12TIY3DSY7OFPXIWTHRMNTE7A3WB5M8N2U5AN"; //hi
    String uploadID = "LE5JS2K6C208JU7ZX1QD2TVRWXOWWF4VNG7LE7TFIX5SYNG4HLOGW9CLAD"; // hello

    AbortMultipartUploadRequest request =
        new AbortMultipartUploadRequest(bucketName, fileName, uploadID);

    AmazonS3 s3 =
        new AmazonS3Client(
            new PropertiesCredentials(
                putBucket.class.getResourceAsStream("AwsCredentials.properties")));
    try {
      s3.abortMultipartUpload(request);
      System.out.println();
    } catch (AmazonServiceException ase) {
      System.out.println(
          "Caught an AmazonServiceException, which means your request made it "
              + "to Amazon S3, but was rejected with an error response for some reason.");
      System.out.println("Error Message:    " + ase.getMessage());
      System.out.println("HTTP Status Code: " + ase.getStatusCode());
      System.out.println("AWS Error Code:   " + ase.getErrorCode());
      System.out.println("Error Type:       " + ase.getErrorType());
      System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println(
          "Caught an AmazonClientException, which means the client encountered "
              + "a serious internal problem while trying to communicate with S3, "
              + "such as not being able to access the network.");
      System.out.println("Error Message: " + ace.getMessage());
    }
  }
 private Map<String, AttributeValue> getConsistentInternal(
     String tableName, Key key, int attempt) {
   GetItemRequest request = new GetItemRequest(tableName, key);
   request.setConsistentRead(true);
   try {
     GetItemResult result = ddb.getItem(request);
     Map<String, AttributeValue> attributes = result.getItem();
     if (attributes == null || attributes.isEmpty()) {
       return null;
     }
     return attributes;
   } catch (AmazonServiceException e) {
     if (DynamoDBUtil.AWS_ERR_CODE_RESOURCE_NOT_FOUND.equals(e.getErrorCode())) {
       throw new IllegalArgumentException("no such table: " + tableName, e);
     } else if (DynamoDBUtil.AWS_STATUS_CODE_SERVICE_UNAVAILABLE == e.getStatusCode()) {
       // retry after a small pause
       DynamoDBUtil.sleepBeforeRetry(attempt);
       attempt++;
       return getConsistentInternal(tableName, key, attempt);
     } else {
       throw new DataStoreOperationException(
           "problem with table: " + tableName + ", key: " + key, e);
     }
   }
 }
  /**
   * Returns true if a failed request should be retried.
   *
   * @param method The current HTTP method being executed.
   * @param exception The exception from the failed request.
   * @param retries The number of times the current request has been attempted.
   * @return True if the failed request should be retried.
   */
  private boolean shouldRetry(HttpRequestBase method, Exception exception, int retries) {
    if (retries >= config.getMaxErrorRetry()) return false;

    if (method instanceof HttpEntityEnclosingRequest) {
      HttpEntity entity = ((HttpEntityEnclosingRequest) method).getEntity();
      if (entity != null && !entity.isRepeatable()) {
        if (log.isDebugEnabled()) {
          log.debug("Entity not repeatable");
        }
        return false;
      }
    }

    if (exception instanceof IOException) {
      if (log.isDebugEnabled()) {
        log.debug("Retrying on " + exception.getClass().getName() + ": " + exception.getMessage());
      }
      return true;
    }

    if (exception instanceof AmazonServiceException) {
      AmazonServiceException ase = (AmazonServiceException) exception;

      /*
       * For 500 internal server errors and 503 service
       * unavailable errors, we want to retry, but we need to use
       * an exponential back-off strategy so that we don't overload
       * a server with a flood of retries. If we've surpassed our
       * retry limit we handle the error response as a non-retryable
       * error and go ahead and throw it back to the user as an exception.
       */
      if (ase.getStatusCode() == HttpStatus.SC_INTERNAL_SERVER_ERROR
          || ase.getStatusCode() == HttpStatus.SC_SERVICE_UNAVAILABLE) {
        return true;
      }

      /*
       * Throttling is reported as a 400 error from newer services. To try
       * and smooth out an occasional throttling error, we'll pause and
       * retry, hoping that the pause is long enough for the request to
       * get through the next time.
       */
      if (isThrottlingException(ase)) return true;
    }

    return false;
  }
    public void storeMessageInQueue(String message) {
      // System.out.println("Storing Message...");

      // Send a message

      if (driectDbConnection) {
        DbConnector db = null;
        try {
          db = dbf.createDBConnectorInstance();
        } catch (MailAppDBException e) {
          // TODO Auto-generated catch block
          e.printStackTrace();
        }

        MailAppMessage mailMsg =
            new MailAppMessage(message, "" + System.currentTimeMillis() + "_" + UUID.randomUUID());
        try {
          // printDebug("Calling DB-Instance to store message "+mailMsg.getTimeuuid()+" length:
          // "+mailMsg.getSize()+"\n"+mailMsg.getWholeMessageWITHOUT2LINES());
          db.storeMessage(mailMsg);

        } catch (MailAppDBException mae) {
          // MailStoringWorkerStarter.stopMailStoringWorker(MailStoringWorkerStarter.getWorkerList(), this);
          // stop=true;
          mae.printStackTrace();
        }

      } else {

        try {
          sqs.sendMessage(new SendMessageRequest(mailQueueUrl, message));
        } catch (AmazonServiceException ase) {
          System.out.println(
              "Caught an AmazonServiceException, which means your request made it "
                  + "to Amazon SQS, but was rejected with an error response for some reason.");
          System.out.println("Error Message:    " + ase.getMessage());
          System.out.println("HTTP Status Code: " + ase.getStatusCode());
          System.out.println("AWS Error Code:   " + ase.getErrorCode());
          System.out.println("Error Type:       " + ase.getErrorType());
          System.out.println("Request ID:       " + ase.getRequestId());
        } catch (AmazonClientException ace) {
          System.out.println(
              "Caught an AmazonClientException, which means the client encountered "
                  + "a serious internal problem while trying to communicate with SQS, such as not "
                  + "being able to access the network.");
          System.out.println("Error Message: " + ace.getMessage());
        }

        printDebug("Message stored to " + mailQueueUrl);

        // connect to db ( Cassandra ) and store message
        // OR write Message to SQS to be consumed by Storage Worker ->
        // better? more scalable, db independent

      }
    }
예제 #9
0
 public void create(String store) throws IBlobStore.Error {
   try {
     s3client.createBucket(store);
   } catch (AmazonServiceException e) {
     if (e.getStatusCode() == 403) throw new IBlobStore.AuthError("" + e);
     throw new IBlobStore.IOError("" + e);
   } catch (AmazonClientException e) {
     throw new IBlobStore.IOError("" + e);
   }
 }
 private void logException(AmazonServiceException ase) {
   logger.error(
       "AmazonServiceException: error={}, statuscode={}, "
           + "awserrcode={}, errtype={}, reqid={}",
       ase.getMessage(),
       ase.getStatusCode(),
       ase.getErrorCode(),
       ase.getErrorType(),
       ase.getRequestId());
 }
    @Override
    public boolean shouldRetry(
        AmazonWebServiceRequest originalRequest,
        AmazonClientException exception,
        int retriesAttempted) {
      // Always retry on client exceptions caused by IOException
      if (exception.getCause() instanceof IOException) return true;

      // Only retry on a subset of service exceptions
      if (exception instanceof AmazonServiceException) {
        AmazonServiceException ase = (AmazonServiceException) exception;

        /*
         * For 500 internal server errors and 503 service
         * unavailable errors, we want to retry, but we need to use
         * an exponential back-off strategy so that we don't overload
         * a server with a flood of retries.
         */
        if (ase.getStatusCode() == HttpStatus.SC_INTERNAL_SERVER_ERROR
            || ase.getStatusCode() == HttpStatus.SC_SERVICE_UNAVAILABLE) {
          return true;
        }

        /*
         * Throttling is reported as a 400 error from newer services. To try
         * and smooth out an occasional throttling error, we'll pause and
         * retry, hoping that the pause is long enough for the request to
         * get through the next time.
         */
        if (RetryUtils.isThrottlingException(ase)) return true;

        /*
         * Clock skew exception. If it is then we will get the time offset
         * between the device time and the server time to set the clock skew
         * and then retry the request.
         */
        if (RetryUtils.isClockSkewError(ase)) return true;
      }

      return false;
    }
예제 #12
0
  public boolean uploadToS3(String fileName, String text) throws IOException {

    try {
      String folderName = this.p.getProperty("KEY_NAME");
      String bucketName = this.p.getProperty("AWS_BUCKET_NAME");
      logger.info(LogKey.MESSAGE, "Uploading a new object to S3 from a file");

      byte[] contentAsBytes = text.getBytes("UTF-8");
      ByteArrayInputStream contentsAsStream = new ByteArrayInputStream(contentAsBytes);
      ObjectMetadata md = new ObjectMetadata();
      md.setContentLength(contentAsBytes.length);

      if (this.s3client == null) {
        logger.debug(LogKey.MESSAGE, "reuse S3 client !!");
        this.s3client = this.getS3Client();
      }

      logger.info(LogKey.BUCKET_NAME, bucketName);
      logger.info(LogKey.KEY_NAME, folderName);
      logger.info(LogKey.FILE_NAME, fileName);

      this.s3client.putObject(
          new PutObjectRequest(
              bucketName, folderName + File.separator + fileName, contentsAsStream, md));

      return true;

    } catch (AmazonServiceException ase) {
      logger.error(LogKey.EXCEPTION, ase);
      logger.error(
          LogKey.EXCEPTION,
          "Caught an AmazonServiceException, which "
              + "means your request made it "
              + "to Amazon S3, but was rejected with an error response"
              + " for some reason.");
      logger.error(LogKey.EXCEPTION, "Error Message:    " + ase.getMessage());
      logger.error(LogKey.EXCEPTION, "HTTP Status Code: " + ase.getStatusCode());
      logger.error(LogKey.EXCEPTION, "AWS Error Code:   " + ase.getErrorCode());
      logger.error(LogKey.EXCEPTION, "Error Type:       " + ase.getErrorType());
      logger.error(LogKey.EXCEPTION, "Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      logger.error(
          LogKey.EXCEPTION,
          "Caught an AmazonClientException, which "
              + "means the client encountered "
              + "an internal error while trying to "
              + "communicate with S3, "
              + "such as not being able to access the network.");
      logger.error(LogKey.EXCEPTION, "Error Message: " + ace.getMessage());
    }
    return false;
  }
예제 #13
0
 public boolean storeExists(String store) throws IBlobStore.Error {
   try {
     s3client.listObjects(store);
     return true;
   } catch (final AmazonServiceException e) {
     if (e.getStatusCode() == 404) return false;
     if (e.getStatusCode() == 403) throw new IBlobStore.AuthError("" + e);
     throw new IBlobStore.IOError("" + e);
   } catch (AmazonClientException e) {
     Log.v(TAG, "Error: " + e);
     throw new IBlobStore.IOError("" + e);
   }
 }
  /** Verifies the RetryCondition has collected the expected context information. */
  public static void verifyExpectedContextData(
      ContextDataCollection contextDataCollection,
      AmazonWebServiceRequest failedRequest,
      AmazonClientException expectedException,
      int expectedRetries) {

    Assert.assertEquals(expectedRetries, contextDataCollection.failedRequests.size());
    Assert.assertEquals(expectedRetries, contextDataCollection.exceptions.size());
    Assert.assertEquals(expectedRetries, contextDataCollection.retriesAttemptedValues.size());

    if (expectedRetries > 0) {
      // It should keep getting the same original request instance
      for (AmazonWebServiceRequest seenRequest : contextDataCollection.failedRequests) {
        Assert.assertTrue(seenRequest == failedRequest);
      }

      // Verify the exceptions
      if (expectedException instanceof AmazonServiceException) {
        // It should get service exceptions with the expected error and
        // status code
        AmazonServiceException ase = (AmazonServiceException) expectedException;
        for (AmazonClientException seenException : contextDataCollection.exceptions) {
          Assert.assertTrue(seenException instanceof AmazonServiceException);
          Assert.assertEquals(
              ase.getErrorCode(), ((AmazonServiceException) seenException).getErrorCode());
          Assert.assertEquals(
              ase.getStatusCode(), ((AmazonServiceException) seenException).getStatusCode());
        }
      } else {
        // Client exceptions should have the same expected cause (the
        // same
        // throwable instance from the mock HttpClient).
        Throwable expectedCause = expectedException.getCause();
        for (AmazonClientException seenException : contextDataCollection.exceptions) {
          Assert.assertTrue(expectedCause == seenException.getCause());
        }
      }

      // It should get "retriesAttempted" values starting from 0
      int expectedRetriesAttempted = 0;
      for (int seenRetriesAttempted : contextDataCollection.retriesAttemptedValues) {
        Assert.assertEquals(expectedRetriesAttempted++, seenRetriesAttempted);
      }
    }
  }
 private void deleteItemInternal(String tableName, Key key, int attempt) {
   DeleteItemRequest request = new DeleteItemRequest(tableName, key);
   try {
     ddb.deleteItem(request);
   } catch (AmazonServiceException e) {
     if (DynamoDBUtil.AWS_ERR_CODE_RESOURCE_NOT_FOUND.equals(e.getErrorCode())) {
       throw new IllegalArgumentException("no such table: " + tableName, e);
     } else if (DynamoDBUtil.AWS_STATUS_CODE_SERVICE_UNAVAILABLE == e.getStatusCode()) {
       // retry after a small pause
       DynamoDBUtil.sleepBeforeRetry(attempt);
       attempt++;
       deleteItemInternal(tableName, key, attempt);
     } else {
       throw new DataStoreOperationException(
           "problem with table: " + tableName + ", key: " + key, e);
     }
   }
 }
예제 #16
0
  @Inject
  public AmazonS3Storage(Configuration configuration) {
    bucketName = configuration.getString("storage.s3.bucket");

    String accessKey = configuration.getString("storage.s3.accesskey");
    String secretKey = configuration.getString("storage.s3.secretkey");
    credentials = new BasicAWSCredentials(accessKey, secretKey);

    AmazonS3 amazonS3 = new AmazonS3Client(credentials);

    try {
      if (!(amazonS3.doesBucketExist(bucketName))) {
        amazonS3.createBucket(new CreateBucketRequest(bucketName));
      }

      String bucketLocation = amazonS3.getBucketLocation(new GetBucketLocationRequest(bucketName));
      Logger.info("Amazon S3 bucket created at " + bucketLocation);
    } catch (AmazonServiceException ase) {
      Logger.error(
          "Caught an AmazonServiceException, which "
              + "means your request made it "
              + "to Amazon S3, but was rejected with an error response "
              + "for some reason."
              + " Error Message: "
              + ase.getMessage()
              + " HTTP Status Code: "
              + ase.getStatusCode()
              + " AWS Error Code: "
              + ase.getErrorCode()
              + " Error Type: "
              + ase.getErrorType()
              + " Request ID: "
              + ase.getRequestId());
    } catch (AmazonClientException ace) {
      Logger.error(
          "Caught an AmazonClientException, which "
              + "means the client encountered "
              + "an internal error while trying to "
              + "communicate with S3, "
              + "such as not being able to access the network."
              + " Error Message: "
              + ace.getMessage());
    }
  }
예제 #17
0
  public static String xs3_generate_url(String xs3_objname, String content_type) {
    AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key);
    ClientConfiguration xs3_clientconfig = new ClientConfiguration();
    xs3_clientconfig.setProtocol(Protocol.HTTP);

    S3ClientOptions xs3_client_options = new S3ClientOptions();
    xs3_client_options.setPathStyleAccess(true);

    xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig);
    xs3_client.setEndpoint(xs3_endpoint);
    xs3_client.setS3ClientOptions(xs3_client_options);

    try {
      java.util.Date expiration = new java.util.Date();

      long milliSeconds = expiration.getTime();
      milliSeconds += 1000 * 60 * 5;
      expiration.setTime(milliSeconds);

      GeneratePresignedUrlRequest xs3_genurl_req =
          new GeneratePresignedUrlRequest(xs3_bucketname, xs3_objname);
      xs3_genurl_req.setMethod(HttpMethod.PUT);
      xs3_genurl_req.setExpiration(expiration);
      xs3_genurl_req.setContentType(content_type);
      xs3_genurl_req.addRequestParameter("x-amz-acl", "public-read");

      URL url = xs3_client.generatePresignedUrl(xs3_genurl_req);

      System.out.println(url.toString());
      return url.toString();

    } catch (AmazonServiceException ase) {
      System.out.println("xs3_svr_error_message:" + ase.getMessage());
      System.out.println("xs3_svr_status_code:  " + ase.getStatusCode());
      System.out.println("xs3_svr_error_code:   " + ase.getErrorCode());
      System.out.println("xs3_svr_error_type:   " + ase.getErrorType());
      System.out.println("xs3_svr_request_id:   " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println("xs3_clt_error_message:" + ace.getMessage());
    }

    return null;
  }
  private List<Map<String, AttributeValue>> scanInternal(
      String tableName, Map<String, Condition> filter, int max, int attempt) {
    LinkedList<Map<String, AttributeValue>> items = new LinkedList<Map<String, AttributeValue>>();
    try {
      ScanRequest request = new ScanRequest(tableName).withScanFilter(filter);
      ScanResult result = ddb.scan(request);
      items.addAll(result.getItems());

      // keep repeating until we get through all matched items
      Key lastKeyEvaluated = null;
      do {
        lastKeyEvaluated = result.getLastEvaluatedKey();
        if (lastKeyEvaluated != null) {
          request =
              new ScanRequest(tableName)
                  .withScanFilter(filter)
                  .withExclusiveStartKey(lastKeyEvaluated);
          result = ddb.scan(request);
          items.addAll(result.getItems());
        }
      } while (lastKeyEvaluated != null && items.size() < max);

      // truncate if needed
      while (items.size() > max) {
        items.removeLast();
      }

      return items;
    } catch (AmazonServiceException e) {
      if (DynamoDBUtil.AWS_ERR_CODE_RESOURCE_NOT_FOUND.equals(e.getErrorCode())) {
        throw new IllegalArgumentException("no such table: " + tableName, e);
      } else if (DynamoDBUtil.AWS_STATUS_CODE_SERVICE_UNAVAILABLE == e.getStatusCode()) {
        // retry after a small pause
        DynamoDBUtil.sleepBeforeRetry(attempt);
        attempt++;
        return scanInternal(tableName, filter, max, attempt);
      } else {
        throw new DataStoreOperationException(
            "problem with table: " + tableName + ", filter: " + filter, e);
      }
    }
  }
 private void putItemInternal(
     String tableName, Map<String, AttributeValue> attributes, int attempt)
     throws DataAccessException {
   try {
     PutItemRequest request = new PutItemRequest(tableName, attributes);
     ddb.putItem(request);
   } catch (AmazonServiceException e) {
     if (DynamoDBUtil.AWS_ERR_CODE_RESOURCE_NOT_FOUND.equals(e.getErrorCode())) {
       throw new IllegalArgumentException("no such table: " + tableName, e);
     } else if (DynamoDBUtil.AWS_STATUS_CODE_SERVICE_UNAVAILABLE == e.getStatusCode()) {
       // retry after a small pause
       DynamoDBUtil.sleepBeforeRetry(attempt);
       attempt++;
       putItemInternal(tableName, attributes, attempt);
     } else {
       throw new DataStoreOperationException(
           "problem with table: " + tableName + ", attributes: " + attributes, e);
     }
   }
 }
예제 #20
0
  public void put(String store, String name, ByteBuffer data) throws IBlobStore.Error {
    InputStream is;
    ObjectMetadata om;

    try {
      is = new ByteArrayInputStream(data.array(), 0, data.limit());
      om = new ObjectMetadata();
      om.setContentLength(data.limit());

      Log.v(TAG, "Content Length:" + data.limit());
      om.setContentType("plain/text");

      s3client.putObject(store, name, is, om);
    } catch (AmazonServiceException e) {
      if (e.getStatusCode() == 403) throw new IBlobStore.AuthError("" + e);
      throw new IBlobStore.IOError("" + e);
    } catch (AmazonClientException e) {
      throw new IBlobStore.IOError("" + e);
    }
  }
예제 #21
0
  public static String xs3_init_multi_upload(String xs3_objname, int file_size, String file_type) {
    AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key);
    ClientConfiguration xs3_clientconfig = new ClientConfiguration();
    xs3_clientconfig.setProtocol(Protocol.HTTP);

    S3ClientOptions xs3_client_options = new S3ClientOptions();
    xs3_client_options.setPathStyleAccess(true);

    xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig);
    xs3_client.setEndpoint(xs3_endpoint);
    xs3_client.setS3ClientOptions(xs3_client_options);

    try {
      InitiateMultipartUploadRequest xs3_multi_req =
          new InitiateMultipartUploadRequest(xs3_bucketname, xs3_objname);
      xs3_multi_req.setCannedACL(CannedAccessControlList.PublicRead);
      ObjectMetadata xs3_meta = new ObjectMetadata();
      xs3_meta.setContentType(file_type);
      xs3_multi_req.setObjectMetadata(xs3_meta);

      InitiateMultipartUploadResult xs3_multi_res =
          xs3_client.initiateMultipartUpload(xs3_multi_req);

      String xs3_multi_uploadid = xs3_multi_res.getUploadId();

      String json_urls = gen_part_url(xs3_multi_uploadid, file_size, xs3_objname, file_type);
      return json_urls;

    } catch (AmazonServiceException ase) {
      System.out.println("xs3_svr_error_message:" + ase.getMessage());
      System.out.println("xs3_svr_status_code:  " + ase.getStatusCode());
      System.out.println("xs3_svr_error_code:   " + ase.getErrorCode());
      System.out.println("xs3_svr_error_type:   " + ase.getErrorType());
      System.out.println("xs3_svr_request_id:   " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println("xs3_clt_error_message:" + ace.getMessage());
    }
    return null;
  }
예제 #22
0
  public static void xs3_coplete_multi_upload(String xs3_objname, String uploadId) {
    AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key);
    ClientConfiguration xs3_clientconfig = new ClientConfiguration();
    xs3_clientconfig.setProtocol(Protocol.HTTP);

    S3ClientOptions xs3_client_options = new S3ClientOptions();
    xs3_client_options.setPathStyleAccess(true);

    xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig);
    xs3_client.setEndpoint(xs3_endpoint);
    xs3_client.setS3ClientOptions(xs3_client_options);

    try {
      List<PartETag> rest_parts = listPartsXml(xs3_objname, uploadId);
      if (null == rest_parts) {
        return;
      }
      for (PartETag item : rest_parts) {
        System.out.println(item.getETag() + " -> " + item.getPartNumber());
      }

      CompleteMultipartUploadRequest comp_req =
          new CompleteMultipartUploadRequest(xs3_bucketname, xs3_objname, uploadId, rest_parts);

      CompleteMultipartUploadResult comp_result = xs3_client.completeMultipartUpload(comp_req);

      System.out.println(comp_result.getETag());
      System.out.println(comp_result.getKey());
    } catch (AmazonServiceException ase) {
      System.out.println("xs3_svr_error_message:" + ase.getMessage());
      System.out.println("xs3_svr_status_code:  " + ase.getStatusCode());
      System.out.println("xs3_svr_error_code:   " + ase.getErrorCode());
      System.out.println("xs3_svr_error_type:   " + ase.getErrorType());
      System.out.println("xs3_svr_request_id:   " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println("xs3_clt_error_message:" + ace.getMessage());
    }
  }
예제 #23
0
  public ByteBuffer get(String store, String name) throws IBlobStore.Error {
    try {
      S3Object result = s3client.getObject(store, name);
      long length = result.getObjectMetadata().getContentLength();
      S3ObjectInputStream is = result.getObjectContent();

      Log.v(TAG, " length: " + length);
      if (length > 1024 * 1024) throw new IBlobStore.IOError("data is too big");
      byte[] buf = new byte[(int) length];
      is.read(buf);
      is.close();

      return ByteBuffer.wrap(buf);
    } catch (IOException e) {
      throw new IBlobStore.IOError("" + e);
    } catch (AmazonServiceException e) {
      if (e.getStatusCode() == 403) throw new IBlobStore.AuthError("" + e);
      if (e.getStatusCode() == 404) throw new IBlobStore.NotFoundError("" + e);
      throw new IBlobStore.IOError("" + e);
    } catch (AmazonClientException e) {
      throw new IBlobStore.IOError("" + e);
    }
  }
  /**
   * @param args
   * @throws IOException
   */
  public static void main(String[] args) throws IOException {
    try {

      String myQueueUrl = "https://queue.amazonaws.com/034307772076/MyQueue";

      AmazonSQS sqs =
          new AmazonSQSClient(
              new PropertiesCredentials(
                  SimpleQueueServiceSample.class.getResourceAsStream("AwsCredentials.properties")));

      System.out.println("Sending message 1.");
      sqs.sendMessage(new SendMessageRequest(myQueueUrl, "Test message."));

      System.out.println("Receiving messages:");
      ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(myQueueUrl);
      List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages();
      for (Message message : messages) {
        System.out.print(message.getMessageId());
        System.out.println(" : " + message.getBody());
      }
    } catch (AmazonServiceException ase) {
      System.out.println(
          "Caught an AmazonServiceException, which means your request made it "
              + "to Amazon SQS, but was rejected with an error response for some reason.");
      System.out.println("Error Message:    " + ase.getMessage());
      System.out.println("HTTP Status Code: " + ase.getStatusCode());
      System.out.println("AWS Error Code:   " + ase.getErrorCode());
      System.out.println("Error Type:       " + ase.getErrorType());
      System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println(
          "Caught an AmazonClientException, which means the client encountered "
              + "a serious internal problem while trying to communicate with SQS, such as not "
              + "being able to access the network.");
      System.out.println("Error Message: " + ace.getMessage());
    }
  }
예제 #25
0
  public static void main(String[] args) throws IOException {
    /*
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
      /*
       * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
       * so once a bucket name has been taken by any user, you can't create
       * another bucket with that same name.
       *
       * You can optionally specify a location for your bucket if you want to
       * keep your data closer to your applications or users.
       */
      System.out.println("Creating bucket " + bucketName + "\n");
      s3.createBucket(bucketName);

      /*
       * List the buckets in your account
       */
      System.out.println("Listing buckets");
      for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
      }
      System.out.println();

      /*
       * Upload an object to your bucket - You can easily upload a file to
       * S3, or upload directly an InputStream if you know the length of
       * the data in the stream. You can also specify your own metadata
       * when uploading to S3, which allows you set a variety of options
       * like content-type and content-encoding, plus additional metadata
       * specific to your applications.
       */
      System.out.println("Uploading a new object to S3 from a file\n");
      s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

      /*
       * Download an object - When you download an object, you get all of
       * the object's metadata and a stream from which to read the contents.
       * It's important to read the contents of the stream as quickly as
       * possibly since the data is streamed directly from Amazon S3 and your
       * network connection will remain open until you read all the data or
       * close the input stream.
       *
       * GetObjectRequest also supports several other options, including
       * conditional downloading of objects based on modification times,
       * ETags, and selectively downloading a range of an object.
       */
      System.out.println("Downloading an object");
      S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
      System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
      displayTextInputStream(object.getObjectContent());

      /*
       * List objects in your bucket by prefix - There are many options for
       * listing the objects in your bucket.  Keep in mind that buckets with
       * many objects might truncate their results when listing their objects,
       * so be sure to check if the returned object listing is truncated, and
       * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
       * additional results.
       */
      System.out.println("Listing objects");
      ObjectListing objectListing =
          s3.listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
      for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        System.out.println(
            " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
      }
      System.out.println();

      /*
       * Delete an object - Unless versioning has been turned on for your bucket,
       * there is no way to undelete an object, so use caution when deleting objects.
       */
      System.out.println("Deleting an object\n");
      s3.deleteObject(bucketName, key);

      /*
       * Delete a bucket - A bucket must be completely empty before it can be
       * deleted, so remember to delete any objects from your buckets before
       * you try to delete them.
       */
      System.out.println("Deleting bucket " + bucketName + "\n");
      s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
      System.out.println(
          "Caught an AmazonServiceException, which means your request made it "
              + "to Amazon S3, but was rejected with an error response for some reason.");
      System.out.println("Error Message:    " + ase.getMessage());
      System.out.println("HTTP Status Code: " + ase.getStatusCode());
      System.out.println("AWS Error Code:   " + ase.getErrorCode());
      System.out.println("Error Type:       " + ase.getErrorType());
      System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println(
          "Caught an AmazonClientException, which means the client encountered "
              + "a serious internal problem while trying to communicate with S3, "
              + "such as not being able to access the network.");
      System.out.println("Error Message: " + ace.getMessage());
    }
  }
예제 #26
0
  public static void main(String[] args) throws Exception {
    System.out.println("===========================================");
    System.out.println("Welcome to the AWS Java SDK!");
    System.out.println("===========================================");

    init();

    try {
      /*
       * The Amazon EC2 client allows you to easily launch and configure
       * computing capacity in AWS datacenters.
       *
       * In this sample, we use the EC2 client to list the availability zones
       * in a region, and then list the instances running in those zones.
       */
      DescribeAvailabilityZonesResult availabilityZonesResult = ec2.describeAvailabilityZones();
      List<AvailabilityZone> availabilityZones = availabilityZonesResult.getAvailabilityZones();
      System.out.println("You have access to " + availabilityZones.size() + " availability zones:");
      for (AvailabilityZone zone : availabilityZones) {
        System.out.println(" - " + zone.getZoneName() + " (" + zone.getRegionName() + ")");
      }

      DescribeInstancesResult describeInstancesResult = ec2.describeInstances();
      Set<Instance> instances = new HashSet<Instance>();
      for (Reservation reservation : describeInstancesResult.getReservations()) {
        instances.addAll(reservation.getInstances());
      }

      System.out.println("You have " + instances.size() + " Amazon EC2 instance(s) running.");

      /*
       * The Amazon S3 client allows you to manage and configure buckets
       * and to upload and download data.
       *
       * In this sample, we use the S3 client to list all the buckets in
       * your account, and then iterate over the object metadata for all
       * objects in one bucket to calculate the total object count and
       * space usage for that one bucket. Note that this sample only
       * retrieves the object's metadata and doesn't actually download the
       * object's content.
       *
       * In addition to the low-level Amazon S3 client in the SDK, there
       * is also a high-level TransferManager API that provides
       * asynchronous management of uploads and downloads with an easy to
       * use API:
       *   http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/transfer/TransferManager.html
       */
      List<Bucket> buckets = s3.listBuckets();
      System.out.println("You have " + buckets.size() + " Amazon S3 bucket(s).");

      if (buckets.size() > 0) {
        Bucket bucket = buckets.get(0);

        long totalSize = 0;
        long totalItems = 0;
        /*
         * The S3Objects and S3Versions classes provide convenient APIs
         * for iterating over the contents of your buckets, without
         * having to manually deal with response pagination.
         */
        for (S3ObjectSummary objectSummary : S3Objects.inBucket(s3, bucket.getName())) {
          totalSize += objectSummary.getSize();
          totalItems++;
        }

        System.out.println(
            "The bucket '"
                + bucket.getName()
                + "' contains "
                + totalItems
                + " objects "
                + "with a total size of "
                + totalSize
                + " bytes.");
      }
    } catch (AmazonServiceException ase) {
      /*
       * AmazonServiceExceptions represent an error response from an AWS
       * services, i.e. your request made it to AWS, but the AWS service
       * either found it invalid or encountered an error trying to execute
       * it.
       */
      System.out.println("Error Message:    " + ase.getMessage());
      System.out.println("HTTP Status Code: " + ase.getStatusCode());
      System.out.println("AWS Error Code:   " + ase.getErrorCode());
      System.out.println("Error Type:       " + ase.getErrorType());
      System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      /*
       * AmazonClientExceptions represent an error that occurred inside
       * the client on the local host, either while trying to send the
       * request to AWS or interpret the response. For example, if no
       * network connection is available, the client won't be able to
       * connect to AWS to execute a request and will throw an
       * AmazonClientException.
       */
      System.out.println("Error Message: " + ace.getMessage());
    }
  }
예제 #27
0
  /**
   * Internal method to execute the HTTP method given.
   *
   * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler)
   * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler,
   *     ExecutionContext)
   */
  private <T> Response<T> executeHelper(
      Request<?> request,
      HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler,
      HttpResponseHandler<AmazonServiceException> errorResponseHandler,
      ExecutionContext executionContext)
      throws AmazonClientException, AmazonServiceException {
    /*
     * Depending on which response handler we end up choosing to handle the
     * HTTP response, it might require us to leave the underlying HTTP
     * connection open, depending on whether or not it reads the complete
     * HTTP response stream from the HTTP connection, or if delays reading
     * any of the content until after a response is returned to the caller.
     */
    boolean leaveHttpConnectionOpen = false;
    AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics();
    /* add the service endpoint to the logs. You can infer service name from service endpoint */
    awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName());
    awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint());
    // Apply whatever request options we know how to handle, such as user-agent.
    setUserAgent(request);
    int requestCount = 0;
    URI redirectedURI = null;
    HttpEntity entity = null;
    AmazonClientException retriedException = null;

    // Make a copy of the original request params and headers so that we can
    // permute it in this loop and start over with the original every time.
    Map<String, String> originalParameters = new LinkedHashMap<String, String>();
    originalParameters.putAll(request.getParameters());
    Map<String, String> originalHeaders = new HashMap<String, String>();
    originalHeaders.putAll(request.getHeaders());
    final AWSCredentials credentials = executionContext.getCredentials();
    Signer signer = null;

    while (true) {
      ++requestCount;
      awsRequestMetrics.setCounter(Field.RequestCount, requestCount);
      if (requestCount > 1) { // retry
        request.setParameters(originalParameters);
        request.setHeaders(originalHeaders);
      }
      HttpRequestBase httpRequest = null;
      org.apache.http.HttpResponse apacheResponse = null;

      try {
        // Sign the request if a signer was provided
        if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint());
        if (signer != null && credentials != null) {
          awsRequestMetrics.startEvent(Field.RequestSigningTime);
          try {
            signer.sign(request, credentials);
          } finally {
            awsRequestMetrics.endEvent(Field.RequestSigningTime);
          }
        }

        if (requestLog.isDebugEnabled()) {
          requestLog.debug("Sending Request: " + request.toString());
        }

        httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext);

        if (httpRequest instanceof HttpEntityEnclosingRequest) {
          entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity();
        }

        if (redirectedURI != null) {
          httpRequest.setURI(redirectedURI);
        }

        if (requestCount > 1) { // retry
          awsRequestMetrics.startEvent(Field.RetryPauseTime);
          try {
            pauseBeforeNextRetry(
                request.getOriginalRequest(),
                retriedException,
                requestCount,
                config.getRetryPolicy());
          } finally {
            awsRequestMetrics.endEvent(Field.RetryPauseTime);
          }
        }

        if (entity != null) {
          InputStream content = entity.getContent();
          if (requestCount > 1) { // retry
            if (content.markSupported()) {
              content.reset();
              content.mark(-1);
            }
          } else {
            if (content.markSupported()) {
              content.mark(-1);
            }
          }
        }

        captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics);
        HttpContext httpContext = new BasicHttpContext();
        httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics);
        retriedException = null;
        awsRequestMetrics.startEvent(Field.HttpRequestTime);
        try {
          apacheResponse = httpClient.execute(httpRequest, httpContext);
        } finally {
          awsRequestMetrics.endEvent(Field.HttpRequestTime);
        }

        if (isRequestSuccessful(apacheResponse)) {
          awsRequestMetrics.addProperty(
              Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
          /*
           * If we get back any 2xx status code, then we know we should
           * treat the service call as successful.
           */
          leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen();
          HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse);
          T response =
              handleResponse(
                  request,
                  responseHandler,
                  httpRequest,
                  httpResponse,
                  apacheResponse,
                  executionContext);
          return new Response<T>(response, httpResponse);
        } else if (isTemporaryRedirect(apacheResponse)) {
          /*
           * S3 sends 307 Temporary Redirects if you try to delete an
           * EU bucket from the US endpoint. If we get a 307, we'll
           * point the HTTP method to the redirected location, and let
           * the next retry deliver the request to the right location.
           */
          Header[] locationHeaders = apacheResponse.getHeaders("location");
          String redirectedLocation = locationHeaders[0].getValue();
          log.debug("Redirecting to: " + redirectedLocation);
          redirectedURI = URI.create(redirectedLocation);
          httpRequest.setURI(redirectedURI);
          awsRequestMetrics.addProperty(
              Field.StatusCode, apacheResponse.getStatusLine().getStatusCode());
          awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation);
          awsRequestMetrics.addProperty(Field.AWSRequestID, null);

        } else {
          leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen();
          AmazonServiceException ase =
              handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse);
          awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId());
          awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode());
          awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode());

          if (!shouldRetry(
              request.getOriginalRequest(),
              httpRequest,
              ase,
              requestCount,
              config.getRetryPolicy())) {
            throw ase;
          }

          // Cache the retryable exception
          retriedException = ase;
          /*
           * Checking for clock skew error again because we don't want to set the
           * global time offset for every service exception.
           */
          if (RetryUtils.isClockSkewError(ase)) {
            int timeOffset = parseClockSkewOffset(apacheResponse, ase);
            SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset);
          }
          resetRequestAfterError(request, ase);
        }
      } catch (IOException ioe) {
        if (log.isInfoEnabled()) {
          log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe);
        }
        awsRequestMetrics.incrementCounter(Field.Exception);
        awsRequestMetrics.addProperty(Field.Exception, ioe);
        awsRequestMetrics.addProperty(Field.AWSRequestID, null);

        AmazonClientException ace =
            new AmazonClientException("Unable to execute HTTP request: " + ioe.getMessage(), ioe);
        if (!shouldRetry(
            request.getOriginalRequest(),
            httpRequest,
            ace,
            requestCount,
            config.getRetryPolicy())) {
          throw ace;
        }

        // Cache the retryable exception
        retriedException = ace;
        resetRequestAfterError(request, ioe);
      } catch (RuntimeException e) {
        throw handleUnexpectedFailure(e, awsRequestMetrics);
      } catch (Error e) {
        throw handleUnexpectedFailure(e, awsRequestMetrics);
      } finally {
        /*
         * Some response handlers need to manually manage the HTTP
         * connection and will take care of releasing the connection on
         * their own, but if this response handler doesn't need the
         * connection left open, we go ahead and release the it to free
         * up resources.
         */
        if (!leaveHttpConnectionOpen) {
          try {
            if (apacheResponse != null
                && apacheResponse.getEntity() != null
                && apacheResponse.getEntity().getContent() != null) {
              apacheResponse.getEntity().getContent().close();
            }
          } catch (IOException e) {
            log.warn("Cannot close the response content.", e);
          }
        }
      }
    } /* end while (true) */
  }
예제 #28
0
  private void jButton1ActionPerformed(
      java.awt.event.ActionEvent evt) { // GEN-FIRST:event_jButton1ActionPerformed
    // TODO add your handling code here: upload file
    JFileChooser jfc = new JFileChooser();
    jfc.setAcceptAllFileFilterUsed(false);
    jfc.setFileFilter(new FileNameExtensionFilter("Text Files", "txt"));
    int returnVal = jfc.showOpenDialog(this);
    if (returnVal == JFileChooser.APPROVE_OPTION) {
      File file = jfc.getSelectedFile();
      try {
        filepath = file.getCanonicalPath();
        FileReader fr = new FileReader(filepath);

        BufferedReader br = new BufferedReader(fr);
        String line;
        int total_no_of_words = 0;

        while ((line = br.readLine()) != null) {

          String[] words_arr = line.split(" ");
          total_no_of_words += words_arr.length;
        }
        jLabel4.setText("word count :" + total_no_of_words);
        System.out.println("Total no of words = " + total_no_of_words);
        if (total_no_of_words != 0) {
          File userFile = new File(filepath);
          keyName = userFile.getName();
          String myAccessKeyID = "AKIAI7PATIX6LHJA3A3Q";
          String mySecretKey = "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";
          AWSCredentials myCredentials = new BasicAWSCredentials(myAccessKeyID, mySecretKey);
          ClientConfiguration clientConfig = new ClientConfiguration();
          clientConfig.setProtocol(Protocol.HTTP);
          AmazonS3 conn = new AmazonS3Client(myCredentials, clientConfig);
          try {

            File file1 = new File(filepath);
            file_size = file1.length();
            if (file_size < 100000) {
              conn.putObject(new PutObjectRequest(bucketName, keyName, file1));
              jLabel2.setText("Uploading complete !");
            } else {
              jLabel2.setText("Uploading failed size greater than 100kb...");
            }
          } catch (AmazonServiceException ase) {
            System.out.println(
                "Caught an AmazonServiceException, which "
                    + "means your request made it "
                    + "to Amazon S3, but was rejected with an error response"
                    + " for some reason.");
            System.out.println("Error Message:    " + ase.getMessage());
            System.out.println("HTTP Status Code: " + ase.getStatusCode());
            System.out.println("AWS Error Code:   " + ase.getErrorCode());
            System.out.println("Error Type:       " + ase.getErrorType());
            System.out.println("Request ID:       " + ase.getRequestId());
          } catch (AmazonClientException ace) {
            System.out.println(
                "Caught an AmazonClientException, which "
                    + "means the client encountered "
                    + "an internal error while trying to "
                    + "communicate with S3, "
                    + "such as not being able to access the network.");
            System.out.println("Error Message: " + ace.getMessage());
          } /*catch (InterruptedException ex) {
                Logger.getLogger(Upload.class.getName()).log(Level.SEVERE, null, ex);
            }*/
        } else {
          jLabel2.setText("Uploading failed file empty !");
        }

      } catch (IOException ex) {
        Logger.getLogger(Upload.class.getName()).log(Level.SEVERE, null, ex);
      }
    }
    if (returnVal == JFileChooser.CANCEL_OPTION) {
      System.exit(0);
    }
  } // GEN-LAST:event_jButton1ActionPerformed
예제 #29
0
  public static String gen_part_url(
      String uploadId, int file_size, String file_name, String file_type) {
    AWSCredentials xs3_credentials = new BasicAWSCredentials(xs3_access_key, xs3_secret_key);
    ClientConfiguration xs3_clientconfig = new ClientConfiguration();
    xs3_clientconfig.setProtocol(Protocol.HTTP);

    S3ClientOptions xs3_client_options = new S3ClientOptions();
    xs3_client_options.setPathStyleAccess(true);

    xs3_client = new AmazonS3Client(xs3_credentials, xs3_clientconfig);
    xs3_client.setEndpoint(xs3_endpoint);
    xs3_client.setS3ClientOptions(xs3_client_options);

    try {

      final int xs3_part_size = 1024 * 1024 * 5;

      int xs3_part_count = (int) Math.ceil((double) (file_size) / (double) xs3_part_size);
      JSONArray jsonArray = new JSONArray();
      JSONObject jsonObject_1 = new JSONObject();
      jsonObject_1.put("total_num", xs3_part_count);
      jsonObject_1.put("upload_id", uploadId);

      JSONArray jsonArray_sub = new JSONArray();
      for (int part_no = 0; part_no < xs3_part_count; part_no++) {

        long xs3_offset_bytes = xs3_part_size * part_no;

        long part_size =
            xs3_part_size < (file_size - xs3_offset_bytes)
                ? xs3_part_size
                : (file_size - xs3_offset_bytes);

        java.util.Date expiration = new java.util.Date();
        long milliSeconds = expiration.getTime();
        milliSeconds += 1000 * 60 * 5;
        expiration.setTime(milliSeconds);

        GeneratePresignedUrlRequest xs3_genurl_req =
            new GeneratePresignedUrlRequest(xs3_bucketname, file_name);
        xs3_genurl_req.setMethod(HttpMethod.PUT);
        xs3_genurl_req.setExpiration(expiration);
        xs3_genurl_req.setContentType(file_type);
        xs3_genurl_req.addRequestParameter("uploadId", uploadId);
        xs3_genurl_req.addRequestParameter("partNumber", String.valueOf(part_no + 1));

        URL url = xs3_client.generatePresignedUrl(xs3_genurl_req);
        System.out.println(url.toString());

        JSONObject jsonObject = new JSONObject();
        jsonObject.put("part_idx", part_no);
        jsonObject.put("part_url", url.toString());
        jsonObject.put("upload_len", part_size);
        jsonObject.put("part_begin", xs3_offset_bytes);
        jsonObject.put("part_end", xs3_offset_bytes + part_size);

        jsonArray_sub.add(jsonObject);
      }

      jsonObject_1.put("multi_list", jsonArray_sub);
      jsonArray.add(jsonObject_1);

      return jsonArray.toString();

    } catch (AmazonServiceException ase) {
      System.out.println("xs3_svr_error_message:" + ase.getMessage());
      System.out.println("xs3_svr_status_code:  " + ase.getStatusCode());
      System.out.println("xs3_svr_error_code:   " + ase.getErrorCode());
      System.out.println("xs3_svr_error_type:   " + ase.getErrorType());
      System.out.println("xs3_svr_request_id:   " + ase.getRequestId());
    } catch (AmazonClientException ace) {
      System.out.println("xs3_clt_error_message:" + ace.getMessage());
    }

    return null;
  }
  /** @param args */
  public static void main(String[] args) {
    // ============================================================================================//
    // =============================== Submitting a Request
    // =======================================//
    // ============================================================================================//

    // Create the AmazonEC2Client object so we can call various APIs.
    AmazonEC2 ec2 = new AmazonEC2Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    ec2.setRegion(usWest2);

    // Initializes a Spot Instance Request
    RequestSpotInstancesRequest requestRequest = new RequestSpotInstancesRequest();

    // *************************** Required Parameters Settings ************************//
    // Request 1 x t1.micro instance with a bid price of $0.03.
    requestRequest.setSpotPrice("0.03");
    requestRequest.setInstanceCount(Integer.valueOf(1));

    // Setup the specifications of the launch. This includes the instance type (e.g. t1.micro)
    // and the latest Amazon Linux AMI id available. Note, you should always use the latest
    // Amazon Linux AMI id or another of your choosing.
    LaunchSpecification launchSpecification = new LaunchSpecification();
    launchSpecification.setImageId("ami-8c1fece5");
    launchSpecification.setInstanceType("t1.micro");

    // Add the security group to the request.
    ArrayList<String> securityGroups = new ArrayList<String>();
    securityGroups.add("GettingStartedGroup");
    launchSpecification.setSecurityGroups(securityGroups);

    // *************************** Bid Type Settings ************************//
    // Set the type of the bid to persistent.
    requestRequest.setType("persistent");

    // *************************** Valid From/To Settings ************************//
    // Set the valid start time to be two minutes from now.
    Calendar from = Calendar.getInstance();
    from.add(Calendar.MINUTE, 2);
    requestRequest.setValidFrom(from.getTime());

    // Set the valid end time to be two minutes and two hours from now.
    Calendar until = (Calendar) from.clone();
    until.add(Calendar.HOUR, 2);
    requestRequest.setValidUntil(until.getTime());

    // *************************** Launch Group Settings ************************//
    // Set the launch group.
    requestRequest.setLaunchGroup("ADVANCED-DEMO-LAUNCH-GROUP");

    // *************************** Availability Zone Group Settings ************************//
    // Set the availability zone group.
    requestRequest.setAvailabilityZoneGroup("ADVANCED-DEMO-AZ-GROUP");

    // *************************** Add the block device mapping ************************//

    // Goal: Setup block device mappings to ensure that we will not delete
    // the root partition on termination.

    // Create the block device mapping to describe the root partition.
    BlockDeviceMapping blockDeviceMapping = new BlockDeviceMapping();
    blockDeviceMapping.setDeviceName("/dev/sda1");

    // Set the delete on termination flag to false.
    EbsBlockDevice ebs = new EbsBlockDevice();
    ebs.setDeleteOnTermination(Boolean.FALSE);
    blockDeviceMapping.setEbs(ebs);

    // Add the block device mapping to the block list.
    ArrayList<BlockDeviceMapping> blockList = new ArrayList<BlockDeviceMapping>();
    blockList.add(blockDeviceMapping);

    // Set the block device mapping configuration in the launch specifications.
    launchSpecification.setBlockDeviceMappings(blockList);

    // *************************** Add the availability zone ************************//
    // Setup the availability zone to use. Note we could retrieve the availability
    // zones using the ec2.describeAvailabilityZones() API. For this demo we will just use
    // us-east-1b.
    SpotPlacement placement = new SpotPlacement("us-east-1b");
    launchSpecification.setPlacement(placement);

    // *************************** Add the placement group ************************//
    // Setup the placement group to use with whatever name you desire.
    // For this demo we will just use "ADVANCED-DEMO-PLACEMENT-GROUP".
    // Note: We have commented this out, because we are not leveraging cc1.4xlarge or
    // cg1.4xlarge in this example.
    /*
    SpotPlacement pg = new SpotPlacement();
    pg.setGroupName("ADVANCED-DEMO-PLACEMENT-GROUP");
    launchSpecification.setPlacement(pg);
    */

    // *************************** Add the launch specification ************************//
    // Add the launch specification.
    requestRequest.setLaunchSpecification(launchSpecification);

    // ============================================================================================//
    // =========================== Getting the Request ID from the Request
    // ========================//
    // ============================================================================================//

    // Call the RequestSpotInstance API.
    RequestSpotInstancesResult requestResult = ec2.requestSpotInstances(requestRequest);
    List<SpotInstanceRequest> requestResponses = requestResult.getSpotInstanceRequests();

    // Setup an arraylist to collect all of the request ids we want to watch hit the running
    // state.
    ArrayList<String> spotInstanceRequestIds = new ArrayList<String>();

    // Add all of the request ids to the hashset, so we can determine when they hit the
    // active state.
    for (SpotInstanceRequest requestResponse : requestResponses) {
      System.out.println("Created Spot Request: " + requestResponse.getSpotInstanceRequestId());
      spotInstanceRequestIds.add(requestResponse.getSpotInstanceRequestId());
    }

    // ============================================================================================//
    // =========================== Determining the State of the Spot Request
    // ======================//
    // ============================================================================================//

    // Create a variable that will track whether there are any requests still in the open state.
    boolean anyOpen;

    // Initialize variables.
    ArrayList<String> instanceIds = new ArrayList<String>();

    do {
      // Create the describeRequest with tall of the request id to monitor (e.g. that we started).
      DescribeSpotInstanceRequestsRequest describeRequest =
          new DescribeSpotInstanceRequestsRequest();
      describeRequest.setSpotInstanceRequestIds(spotInstanceRequestIds);

      // Initialize the anyOpen variable to false ??? which assumes there are no requests open
      // unless
      // we find one that is still open.
      anyOpen = false;

      try {
        // Retrieve all of the requests we want to monitor.
        DescribeSpotInstanceRequestsResult describeResult =
            ec2.describeSpotInstanceRequests(describeRequest);
        List<SpotInstanceRequest> describeResponses = describeResult.getSpotInstanceRequests();

        // Look through each request and determine if they are all in the active state.
        for (SpotInstanceRequest describeResponse : describeResponses) {
          // If the state is open, it hasn't changed since we attempted to request it.
          // There is the potential for it to transition almost immediately to closed or
          // cancelled so we compare against open instead of active.
          if (describeResponse.getState().equals("open")) {
            anyOpen = true;
            break;
          }

          // Add the instance id to the list we will eventually terminate.
          instanceIds.add(describeResponse.getInstanceId());
        }
      } catch (AmazonServiceException e) {
        // If we have an exception, ensure we don't break out of the loop.
        // This prevents the scenario where there was blip on the wire.
        anyOpen = true;
      }

      try {
        // Sleep for 60 seconds.
        Thread.sleep(60 * 1000);
      } catch (Exception e) {
        // Do nothing because it woke up early.
      }
    } while (anyOpen);

    // ============================================================================================//
    // ====================================== Canceling the Request ==============================//
    // ============================================================================================//

    try {
      // Cancel requests.
      CancelSpotInstanceRequestsRequest cancelRequest =
          new CancelSpotInstanceRequestsRequest(spotInstanceRequestIds);
      ec2.cancelSpotInstanceRequests(cancelRequest);
    } catch (AmazonServiceException e) {
      // Write out any exceptions that may have occurred.
      System.out.println("Error cancelling instances");
      System.out.println("Caught Exception: " + e.getMessage());
      System.out.println("Reponse Status Code: " + e.getStatusCode());
      System.out.println("Error Code: " + e.getErrorCode());
      System.out.println("Request ID: " + e.getRequestId());
    }

    // ============================================================================================//
    // =================================== Terminating any Instances
    // ==============================//
    // ============================================================================================//
    try {
      // Terminate instances.
      TerminateInstancesRequest terminateRequest = new TerminateInstancesRequest(instanceIds);
      ec2.terminateInstances(terminateRequest);
    } catch (AmazonServiceException e) {
      // Write out any exceptions that may have occurred.
      System.out.println("Error terminating instances");
      System.out.println("Caught Exception: " + e.getMessage());
      System.out.println("Reponse Status Code: " + e.getStatusCode());
      System.out.println("Error Code: " + e.getErrorCode());
      System.out.println("Request ID: " + e.getRequestId());
    }
  }