@Test
 public void testByteArrayAllocation() {
   GemFireCacheImpl gfc = createCache();
   try {
     MemoryAllocator ma = gfc.getOffHeapStore();
     assertNotNull(ma);
     final long offHeapSize = ma.getFreeMemory();
     assertEquals(0, ma.getUsedMemory());
     byte[] data = new byte[] {1, 2, 3, 4, 5, 6, 7, 8};
     MemoryChunk mc1 = (MemoryChunk) ma.allocateAndInitialize(data, false, false, null);
     assertEquals(data.length + perObjectOverhead(), ma.getUsedMemory());
     assertEquals(offHeapSize - (data.length + perObjectOverhead()), ma.getFreeMemory());
     byte[] data2 = new byte[data.length];
     mc1.readBytes(0, data2);
     assertTrue(Arrays.equals(data, data2));
     mc1.release();
     assertEquals(offHeapSize, ma.getFreeMemory());
     assertEquals(0, ma.getUsedMemory());
     // try some small byte[] that don't need to be stored off heap.
     data = new byte[] {1, 2, 3, 4, 5, 6, 7};
     StoredObject so1 = ma.allocateAndInitialize(data, false, false, null);
     assertEquals(0, ma.getUsedMemory());
     assertEquals(offHeapSize, ma.getFreeMemory());
     data2 = new byte[data.length];
     data2 = (byte[]) so1.getDeserializedForReading();
     assertTrue(Arrays.equals(data, data2));
   } finally {
     closeCache(gfc, false);
   }
 }
Ejemplo n.º 2
0
  private void respondGetObjectAndClose(HttpExchange exchange, StoredObject storedObject)
      throws IOException {
    byte[] response = storedObject.getContent();

    addHeader(exchange, "ETag", "\"" + storedObject.md5HexString() + "\"");
    addHeader(exchange, HttpHeaders.CONTENT_TYPE, "text/plain");
    respondAndClose(exchange, HttpURLConnection.HTTP_OK, response);
  }
  private StoredObject createCombinedObjectLarge(CombinedStoredObject combinedObject) {
    URI location = combinedObject.getLocation();
    log.info("starting multipart upload: %s", location);

    String bucket = getS3Bucket(location);
    String key = getS3ObjectKey(location);

    String uploadId =
        s3Service
            .initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key))
            .getUploadId();

    try {
      List<PartETag> parts = newArrayList();
      int partNumber = 1;
      for (StoredObject newCombinedObjectPart : combinedObject.getSourceParts()) {
        CopyPartResult part =
            s3Service.copyPart(
                new CopyPartRequest()
                    .withUploadId(uploadId)
                    .withPartNumber(partNumber)
                    .withDestinationBucketName(bucket)
                    .withDestinationKey(key)
                    .withSourceBucketName(getS3Bucket(newCombinedObjectPart.getLocation()))
                    .withSourceKey(getS3ObjectKey(newCombinedObjectPart.getLocation())));
        parts.add(new PartETag(partNumber, part.getETag()));
        partNumber++;
      }

      String etag =
          s3Service
              .completeMultipartUpload(
                  new CompleteMultipartUploadRequest(bucket, key, uploadId, parts))
              .getETag();

      ObjectMetadata newObject = s3Service.getObjectMetadata(bucket, key);
      log.info("completed multipart upload: %s", location);

      if (!etag.equals(newObject.getETag())) {
        // this might happen in rare cases due to S3's eventual consistency
        throw new IllegalStateException("completed etag is different from combined object etag");
      }

      return updateStoredObject(location, newObject);
    } catch (AmazonClientException e) {
      try {
        s3Service.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId));
      } catch (AmazonClientException ignored) {
      }
      throw Throwables.propagate(e);
    }
  }
Ejemplo n.º 4
0
  public StoredObject getStoredObject(ITransaction transaction, String uri) {

    StoredObject so = null;

    File file = new File(_root, uri);
    if (file.exists()) {
      so = new StoredObject();
      so.setFolder(file.isDirectory());
      so.setLastModified(new Date(file.lastModified()));
      so.setCreationDate(new Date(file.lastModified()));
      so.setResourceLength(getResourceLength(transaction, uri));
    }

    return so;
  }
Ejemplo n.º 5
0
  private void handlePutObject(HttpExchange exchange, String bucketName, String keyName)
      throws IOException {
    if (DOUBLE_DOT_PATTERN.matcher(keyName).matches()) {
      respondErrorAndClose(exchange, ErrorResponse.INVALID_URI);
      return;
    }

    byte[] content = readRequestBodyFully(exchange);

    if (buckets.containsKey(bucketName)) {
      Bucket bucket = buckets.get(bucketName);
      StoredObject storedObject = new StoredObject(keyName, content);
      bucket.put(keyName, storedObject);
      addHeader(exchange, HttpHeaders.E_TAG, "\"" + storedObject.md5HexString() + "\"");
      respondAndClose(exchange, HttpURLConnection.HTTP_OK);
    } else {
      respondErrorAndClose(exchange, ErrorResponse.NO_SUCH_BUCKET);
    }
  }
  @Override
  public StoredObject createCombinedObject(CombinedStoredObject combinedObject) {
    Preconditions.checkNotNull(combinedObject, "combinedObject is null");
    Preconditions.checkArgument(
        !combinedObject.getSourceParts().isEmpty(), "combinedObject sourceParts is empty");

    boolean setIsSmall = combinedObject.getSourceParts().get(0).getSize() < 5 * 1024 * 1024;

    // verify size
    for (StoredObject newCombinedObjectPart : combinedObject.getSourceParts()) {
      boolean fileIsSmall = newCombinedObjectPart.getSize() < 5 * 1024 * 1024;
      Preconditions.checkArgument(
          fileIsSmall == setIsSmall,
          "combinedObject sourceParts contains mixed large and small files");
    }

    return setIsSmall
        ? createCombinedObjectSmall(combinedObject)
        : createCombinedObjectLarge(combinedObject);
  }
 public StoredObject getObjectDetails(URI target) {
   StoredObject storedObject = new StoredObject(target);
   ObjectMetadata metadata =
       s3Service.getObjectMetadata(getS3Bucket(target), getS3ObjectKey(target));
   return updateStoredObject(storedObject.getLocation(), metadata);
 }