private ObjectMetadata getS3ObjectMetadata(final Path path) throws IOException { try { return retry() .maxAttempts(maxClientRetries) .exponentialBackoff(new Duration(1, TimeUnit.SECONDS), maxBackoffTime, maxRetryTime, 2.0) .stopOn(InterruptedException.class, UnrecoverableS3OperationException.class) .run( "getS3ObjectMetadata", () -> { try { return s3.getObjectMetadata(uri.getHost(), keyFromPath(path)); } catch (AmazonS3Exception e) { if (e.getStatusCode() == SC_NOT_FOUND) { return null; } else if (e.getStatusCode() == SC_FORBIDDEN) { throw new UnrecoverableS3OperationException(e); } throw Throwables.propagate(e); } }); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw Throwables.propagate(e); } catch (Exception e) { Throwables.propagateIfInstanceOf(e, IOException.class); throw Throwables.propagate(e); } }
private StoredObject createCombinedObjectLarge(CombinedStoredObject combinedObject) { URI location = combinedObject.getLocation(); log.info("starting multipart upload: %s", location); String bucket = getS3Bucket(location); String key = getS3ObjectKey(location); String uploadId = s3Service .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)) .getUploadId(); try { List<PartETag> parts = newArrayList(); int partNumber = 1; for (StoredObject newCombinedObjectPart : combinedObject.getSourceParts()) { CopyPartResult part = s3Service.copyPart( new CopyPartRequest() .withUploadId(uploadId) .withPartNumber(partNumber) .withDestinationBucketName(bucket) .withDestinationKey(key) .withSourceBucketName(getS3Bucket(newCombinedObjectPart.getLocation())) .withSourceKey(getS3ObjectKey(newCombinedObjectPart.getLocation()))); parts.add(new PartETag(partNumber, part.getETag())); partNumber++; } String etag = s3Service .completeMultipartUpload( new CompleteMultipartUploadRequest(bucket, key, uploadId, parts)) .getETag(); ObjectMetadata newObject = s3Service.getObjectMetadata(bucket, key); log.info("completed multipart upload: %s", location); if (!etag.equals(newObject.getETag())) { // this might happen in rare cases due to S3's eventual consistency throw new IllegalStateException("completed etag is different from combined object etag"); } return updateStoredObject(location, newObject); } catch (AmazonClientException e) { try { s3Service.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId)); } catch (AmazonClientException ignored) { } throw Throwables.propagate(e); } }
@Override public StoredObject putObject(URI location, File source) { try { log.info("starting upload: %s", location); PutObjectResult result = s3Service.putObject(getS3Bucket(location), getS3ObjectKey(location), source); ObjectMetadata metadata = s3Service.getObjectMetadata(getS3Bucket(location), getS3ObjectKey(location)); if (!result.getETag().equals(metadata.getETag())) { // this might happen in rare cases due to S3's eventual consistency throw new IllegalStateException("uploaded etag is different from retrieved object etag"); } log.info("completed upload: %s", location); return updateStoredObject(location, metadata); } catch (Exception e) { throw Throwables.propagate(e); } }
public StoredObject getObjectDetails(URI target) { StoredObject storedObject = new StoredObject(target); ObjectMetadata metadata = s3Service.getObjectMetadata(getS3Bucket(target), getS3ObjectKey(target)); return updateStoredObject(storedObject.getLocation(), metadata); }
@Test @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211") public void testEncryption() { Client client = client(); logger.info( "--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); PutRepositoryResponse putRepositoryResponse = client .admin() .cluster() .preparePutRepository("test-repo") .setType("s3") .setSettings( Settings.settingsBuilder() .put("base_path", basePath) .put("chunk_size", randomIntBetween(1000, 10000)) .put("server_side_encryption", true)) .get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i); index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i); index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i); } refresh(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L)); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client .admin() .cluster() .prepareCreateSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-3") .get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); assertThat( client .admin() .cluster() .prepareGetSnapshots("test-repo") .setSnapshots("test-snap") .get() .getSnapshots() .get(0) .state(), equalTo(SnapshotState.SUCCESS)); Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); AmazonS3 s3Client = internalCluster() .getInstance(AwsS3Service.class) .client( null, null, bucket.get("region", settings.get("repositories.s3.region")), bucket.get("access_key", settings.get("cloud.aws.access_key")), bucket.get("secret_key", settings.get("cloud.aws.secret_key"))); String bucketName = bucket.get("bucket"); logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); List<S3ObjectSummary> summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries(); for (S3ObjectSummary summary : summaries) { assertThat( s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256")); } logger.info("--> delete some data"); for (int i = 0; i < 50; i++) { client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get(); } for (int i = 50; i < 100; i++) { client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get(); } for (int i = 0; i < 100; i += 2) { client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get(); } refresh(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L)); assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L)); assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); logger.info("--> close indices"); client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client .admin() .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .execute() .actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L)); assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L)); // Test restore after index deletion logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client .admin() .cluster() .prepareRestoreSnapshot("test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") .execute() .actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L)); ClusterState clusterState = client.admin().cluster().prepareState().get().getState(); assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true)); assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false)); }