S3ObjectSummary findAndQueueObjects(AmazonS3Source.S3Offset s3offset, boolean checkCurrent) throws AmazonClientException { List<S3ObjectSummary> s3ObjectSummaries; ObjectOrdering objectOrdering = s3ConfigBean.s3FileConfig.objectOrdering; switch (objectOrdering) { case TIMESTAMP: s3ObjectSummaries = AmazonS3Util.listObjectsChronologically( s3Client, s3ConfigBean, pathMatcher, s3offset, objectQueue.remainingCapacity()); break; case LEXICOGRAPHICAL: s3ObjectSummaries = AmazonS3Util.listObjectsLexicographically( s3Client, s3ConfigBean, pathMatcher, s3offset, objectQueue.remainingCapacity()); break; default: throw new IllegalArgumentException("Unknown ordering: " + objectOrdering.getLabel()); } for (S3ObjectSummary objectSummary : s3ObjectSummaries) { addObjectToQueue(objectSummary, checkCurrent); } spoolQueueMeter.mark(objectQueue.size()); LOG.debug("Found '{}' files", objectQueue.size()); return (s3ObjectSummaries.isEmpty()) ? null : s3ObjectSummaries.get(s3ObjectSummaries.size() - 1); }
public void postProcessOlderObjectIfNeeded(AmazonS3Source.S3Offset s3Offset) { // If sdc was shutdown after reading an object but before post processing it, handle it now. // The scenario is detected as follows: // 1. the current key must not be null // 2. offset must be -1 // 3. An object with same key must exist in s3 // 4. The timestamp of the object ins3 must be same as that of the timestamp in offset [It is // possible that one // uploads another object with the same name. We can avoid post processing it without // producing records by // comparing the timestamp on that object if (s3Offset.getKey() != null && "-1".equals(s3Offset.getOffset())) { // conditions 1, 2 are met. Check for 3 and 4. S3ObjectSummary objectSummary = AmazonS3Util.getObjectSummary(s3Client, s3ConfigBean.s3Config.bucket, s3Offset.getKey()); if (objectSummary != null && objectSummary .getLastModified() .compareTo(new Date(Long.parseLong(s3Offset.getTimestamp()))) == 0) { postProcessOrErrorHandle( s3Offset.getKey(), s3ConfigBean.postProcessingConfig.postProcessing, s3ConfigBean.postProcessingConfig.postProcessBucket, s3ConfigBean.postProcessingConfig.postProcessPrefix, s3ConfigBean.postProcessingConfig.archivingOption); } } currentObject = null; }
private void archive( String postProcessObjectKey, String postProcessBucket, String postProcessFolder, S3ArchivingOption archivingOption) { boolean isMove = true; String destBucket = s3ConfigBean.s3Config.bucket; switch (archivingOption) { case MOVE_TO_PREFIX: break; case MOVE_TO_BUCKET: destBucket = postProcessBucket; break; case COPY_TO_PREFIX: isMove = false; break; case COPY_TO_BUCKET: isMove = false; destBucket = postProcessBucket; break; default: throw new IllegalStateException("Invalid Archive option : " + archivingOption.name()); } String srcObjKey = postProcessObjectKey.substring( postProcessObjectKey.lastIndexOf(s3ConfigBean.s3Config.delimiter) + 1); String destKey = postProcessFolder + srcObjKey; AmazonS3Util.copy( s3Client, s3ConfigBean.s3Config.bucket, postProcessObjectKey, destBucket, destKey, isMove); }