Exemplo n.º 1
0
 public void testAutocommit() throws Exception {
   Writer file = store.open("urn:test:file").openWriter();
   file.append("blob store test");
   file.close();
   CharSequence str = store.open("urn:test:file").getCharContent(true);
   assertEquals("blob store test", str.toString());
 }
Exemplo n.º 2
0
 public void testReuseVersion() throws Exception {
   BlobVersion trx1 = store.newVersion("urn:test:trx1");
   Writer file1 = trx1.open("urn:test:file1").openWriter();
   file1.append("blob store test");
   file1.close();
   trx1.commit();
   BlobVersion trx2 = store.newVersion("urn:test:trx2");
   file1 = trx2.open("urn:test:file1").openWriter();
   file1.append("blob store test");
   file1.close();
   trx2.commit();
   Writer file2 = trx2.open("urn:test:file2").openWriter();
   file2.append("blob store test");
   file2.close();
   trx2.commit();
   trx2 = store.newVersion("urn:test:trx2");
   file2 = trx2.open("urn:test:file2").openWriter();
   file2.append("blob store test");
   file2.close();
   trx2.commit();
   BlobVersion trx3 = store.newVersion("urn:test:trx3");
   CharSequence str1 = trx3.open("urn:test:file1").getCharContent(true);
   assertEquals("blob store test", str1.toString());
   CharSequence str2 = trx3.open("urn:test:file2").getCharContent(true);
   assertEquals("blob store test", str2.toString());
 }
Exemplo n.º 3
0
 public void testEraseSingleBlob() throws Exception {
   BlobVersion trx1 = store.newVersion("urn:test:trx1");
   Writer file = trx1.open("urn:test:file").openWriter();
   file.append("blob store test");
   file.close();
   trx1.commit();
   store.erase();
   assertEmpty(dir);
 }
Exemplo n.º 4
0
 public void testEraseMultiVersionSingleBlob() throws Exception {
   Writer file = store.open("urn:test:file").openWriter();
   file.append("blob store test1");
   file.close();
   file = store.open("urn:test:file").openWriter();
   file.append("blob store test2");
   file.close();
   store.erase();
   assertEmpty(dir);
 }
Exemplo n.º 5
0
 public void testRoundTripString() throws Exception {
   BlobVersion trx1 = store.newVersion("urn:test:trx1");
   Writer file = trx1.open("urn:test:file").openWriter();
   file.append("blob store test");
   file.close();
   trx1.commit();
   BlobVersion trx2 = store.newVersion("urn:test:trx2");
   CharSequence str = trx2.open("urn:test:file").getCharContent(true);
   assertEquals("blob store test", str.toString());
 }
Exemplo n.º 6
0
 public void testConcurrency() throws Exception {
   Writer test1 = store.open("urn:test:file").openWriter();
   test1.append("test1");
   test1.close();
   Writer test2 = store.open("urn:test:file").openWriter();
   test2.append("test2");
   test2.flush();
   assertEquals("test1", store.open("urn:test:file").getCharContent(true).toString());
   test2.close();
   assertEquals("test2", store.open("urn:test:file").getCharContent(true).toString());
 }
Exemplo n.º 7
0
 private void recalcHashes(DataNode item) throws IOException {
   if (item.dirty == null) {
     return; // not dirty, which means no children are dirty
   }
   // only directories have derived hashes
   if (item instanceof DirectoryNode) {
     DirectoryNode dirNode = (DirectoryNode) item;
     for (DataNode child : dirNode) {
       recalcHashes(child);
     }
     ByteArrayOutputStream bout = new ByteArrayOutputStream();
     hashCalc.sort(dirNode.getChildren());
     String newHash = hashCalc.calcHash(dirNode, bout);
     item.setHash(newHash);
     byte[] arrTriplets = bout.toByteArray();
     blobStore.setBlob(newHash, arrTriplets);
     log.info(
         "recalcHashes: "
             + item.name
             + " children:"
             + dirNode.members.size()
             + " hash="
             + newHash);
   }
 }
Exemplo n.º 8
0
 public void testAtomicity() throws Exception {
   BlobVersion trx1 = store.newVersion("urn:test:trx1");
   Writer file1 = trx1.open("urn:test:file1").openWriter();
   file1.append("blob store test");
   file1.close();
   Writer file2 = trx1.open("urn:test:file2").openWriter();
   file2.append("blob store test");
   file2.close();
   BlobVersion trx2 = store.newVersion("urn:test:trx2");
   assertNull(trx2.open("urn:test:file1").getCharContent(true));
   assertNull(trx2.open("urn:test:file2").getCharContent(true));
   trx1.commit();
   BlobVersion trx3 = store.newVersion("urn:test:trx3");
   assertEquals("blob store test", trx3.open("urn:test:file1").getCharContent(true).toString());
   assertEquals("blob store test", trx3.open("urn:test:file2").getCharContent(true).toString());
 }
Exemplo n.º 9
0
 public void testReopenInvalid() throws Exception {
   try {
     store.openVersion("urn:test:nothing");
     fail();
   } catch (IllegalArgumentException e) {
     // pass
   }
 }
Exemplo n.º 10
0
 protected void initialize(
     BlobStore blobStore, ClusterName clusterName, @Nullable ByteSizeValue defaultChunkSize)
     throws IOException {
   this.blobStore = blobStore;
   this.chunkSize = componentSettings.getAsBytesSize("chunk_size", defaultChunkSize);
   this.basePath = BlobPath.cleanPath().add(clusterName.value());
   this.metaDataBlobContainer = blobStore.immutableBlobContainer(basePath.add("metadata"));
   this.currentIndex = findLatestIndex();
   this.compress = componentSettings.getAsBoolean("compress", true);
   logger.debug("Latest metadata found at index [" + currentIndex + "]");
 }
Exemplo n.º 11
0
 public List<ITriplet> find(String hash) {
   // return DataItem.findByHash(hash, session);
   byte[] arr = blobStore.getBlob(hash);
   if (arr == null) {
     return null;
   }
   ByteArrayInputStream bin = new ByteArrayInputStream(arr);
   try {
     return hashCalc.parseTriplets(bin);
   } catch (IOException ex) {
     throw new RuntimeException(ex);
   }
 }
  protected BlobStoreIndexShardGateway(
      ShardId shardId,
      @IndexSettings Settings indexSettings,
      ThreadPool threadPool,
      IndexGateway indexGateway,
      IndexShard indexShard,
      Store store) {
    super(shardId, indexSettings);

    this.threadPool = threadPool;
    this.indexShard = (InternalIndexShard) indexShard;
    this.store = store;

    BlobStoreIndexGateway blobStoreIndexGateway = (BlobStoreIndexGateway) indexGateway;

    this.chunkSize = blobStoreIndexGateway.chunkSize(); // can be null -> no chunking
    this.blobStore = blobStoreIndexGateway.blobStore();
    this.shardPath = blobStoreIndexGateway.shardPath(shardId.id());

    this.blobContainer = blobStore.immutableBlobContainer(shardPath);

    this.recoveryStatus = new RecoveryStatus();
  }
Exemplo n.º 13
0
 public CommitPoint findCommitPoint(String index, int shardId) throws IOException {
   BlobPath path = BlobStoreIndexGateway.shardPath(basePath, index, shardId);
   ImmutableBlobContainer container = blobStore.immutableBlobContainer(path);
   ImmutableMap<String, BlobMetaData> blobs = container.listBlobs();
   List<CommitPoint> commitPointsList = Lists.newArrayList();
   for (BlobMetaData md : blobs.values()) {
     if (md.length() == 0) { // a commit point that was not flushed yet...
       continue;
     }
     if (md.name().startsWith("commit-")) {
       try {
         commitPointsList.add(CommitPoints.fromXContent(container.readBlobFully(md.name())));
       } catch (Exception e) {
         logger.warn("failed to read commit point at path {} with name [{}]", e, path, md.name());
       }
     }
   }
   CommitPoints commitPoints = new CommitPoints(commitPointsList);
   if (commitPoints.commits().isEmpty()) {
     return null;
   }
   return commitPoints.commits().get(0);
 }
 @Override
 public void close(boolean delete) throws ElasticSearchException {
   if (delete) {
     blobStore.delete(shardPath);
   }
 }
Exemplo n.º 15
0
 public void testEraseEmpty() throws Exception {
   store.erase();
   assertEmpty(dir);
 }
Exemplo n.º 16
0
 public void tearDown() throws Exception {
   store.erase();
   dir.delete();
 }
Exemplo n.º 17
0
 public void testIsolation() throws Exception {
   BlobVersion trx1 = store.newVersion("urn:test:trx1");
   Writer file1 = trx1.open("urn:test:file1").openWriter();
   file1.append("blob store test");
   file1.close();
   final CountDownLatch latch1 = new CountDownLatch(1);
   new Thread(
           new Runnable() {
             public void run() {
               try {
                 error = null;
                 try {
                   BlobVersion trx2 = store.newVersion("urn:test:trx2");
                   BlobObject blob = trx2.open("urn:test:file1");
                   assertNull(blob.getCharContent(true));
                 } catch (Exception e) {
                   e.printStackTrace();
                   fail();
                 } finally {
                   latch1.countDown();
                 }
               } catch (AssertionFailedError e) {
                 error = e;
               }
             }
           })
       .start();
   latch1.await();
   if (error != null) throw error;
   trx1.prepare();
   final CountDownLatch latch2 = new CountDownLatch(1);
   final CountDownLatch latch3 = new CountDownLatch(1);
   new Thread(
           new Runnable() {
             public void run() {
               try {
                 error = null;
                 try {
                   latch2.countDown();
                   BlobVersion trx3 = store.newVersion("urn:test:trx3");
                   BlobObject blob = trx3.open("urn:test:file1");
                   CharSequence str = blob.getCharContent(true);
                   assertNotNull(str);
                   assertEquals("blob store test", str.toString());
                 } catch (Exception e) {
                   e.printStackTrace();
                   fail();
                 } finally {
                   latch3.countDown();
                 }
               } catch (AssertionFailedError e) {
                 error = e;
               }
             }
           })
       .start();
   latch2.await();
   assertFalse(latch3.await(1, TimeUnit.SECONDS));
   trx1.commit();
   latch3.await();
   if (error != null) throw error;
 }
Exemplo n.º 18
0
  /**
   * Returns a hex enccoded SHA1 hash of the whole file. This can be used to locate the files bytes
   * again
   *
   * @param in
   * @param hashStore
   * @param blobStore
   * @return
   * @throws IOException
   */
  public String parse(InputStream in, HashStore hashStore, BlobStore blobStore) throws IOException {
    if (log.isInfoEnabled()) {
      log.info("parse. inputstream: " + in);
    }
    Rsum rsum = new Rsum(128);
    int numBlobs = 0;
    byte[] arr = new byte[1024];
    ByteArrayOutputStream bout = new ByteArrayOutputStream();

    List<String> blobHashes = new ArrayList<>();
    MessageDigest blobCrc = getCrypt();
    MessageDigest fanoutCrc = getCrypt();
    MessageDigest fileCrc = getCrypt();

    long fanoutLength = 0;
    long fileLength = 0;

    int s = in.read(arr, 0, 1024);
    if (log.isTraceEnabled()) {
      log.trace("initial block size: " + s);
    }

    List<String> fanoutHashes = new ArrayList<>();
    while (s >= 0) {
      numBytes += s;
      // log.trace("numBytes: {}", numBytes);
      if (cancelled) {
        throw new IOException("operation cancelled");
      }
      for (int i = 0; i < s; i++) {
        byte b = arr[i];
        rsum.roll(b);
        blobCrc.update(b);
        fanoutCrc.update(b);
        fileCrc.update(b);
        fanoutLength++;
        fileLength++;
        bout.write(b);
        int x = rsum.getValue();

        // System.out.println("x=" + x);
        // System.out.println("check mask: " + (x & MASK) + " == " + MASK);
        boolean limited;
        if (MAX_BLOB_SIZE != null) {
          limited = bout.size() > MAX_BLOB_SIZE;
          if (limited) {
            log.warn("HIT BLOB LIMIT: " + bout.size());
          }
        } else {
          limited = false;
        }
        if (((x & MASK) == MASK) || limited) {
          String blobCrcHex = toHex(blobCrc);
          byte[] blobBytes = bout.toByteArray();
          if (log.isInfoEnabled()) {
            log.info(
                "Store blob: "
                    + blobCrcHex
                    + " length="
                    + blobBytes.length
                    + " hash: "
                    + x
                    + " mask: "
                    + MASK);
          }
          blobStore.setBlob(blobCrcHex, blobBytes);
          bout.reset();
          blobHashes.add(blobCrcHex);
          blobCrc.reset();
          if ((x & FANOUT_MASK) == FANOUT_MASK) {
            String fanoutCrcVal = toHex(fanoutCrc);
            fanoutHashes.add(fanoutCrcVal);
            // log.info("set chunk fanout: {} length={}", fanoutCrcVal, fanoutLength);
            hashStore.setChunkFanout(fanoutCrcVal, blobHashes, fanoutLength);
            fanoutLength = 0;
            fanoutCrc.reset();
            blobHashes = new ArrayList<>();
          }
          numBlobs++;
          rsum.reset();
        }
      }

      s = in.read(arr, 0, 1024);
    }
    // Need to store terminal data, ie data which has been accumulated since the last boundary
    String blobCrcHex = toHex(blobCrc);
    // System.out.println("Store terminal blob: " + blobCrcHex);
    blobStore.setBlob(blobCrcHex, bout.toByteArray());
    numBlobs++;
    blobHashes.add(blobCrcHex);
    String fanoutCrcVal = toHex(fanoutCrc);
    // log.info("set terminal chunk fanout: {} length={}" ,fanoutCrcVal, fanoutLength);

    hashStore.setChunkFanout(fanoutCrcVal, blobHashes, fanoutLength);
    fanoutHashes.add(fanoutCrcVal);

    // Now store a fanout for the whole file. The contained hashes locate other fanouts
    String fileCrcVal = toHex(fileCrc);
    if (log.isInfoEnabled()) {
      log.info(
          "set file fanout: "
              + fanoutCrcVal
              + "  length="
              + fileLength
              + " avg blob size="
              + fileLength / numBlobs);
    }
    hashStore.setFileFanout(fileCrcVal, fanoutHashes, fileLength);
    return fileCrcVal;
  }
Exemplo n.º 19
0
 public boolean dirExists(String hash) {
   return blobStore.hasBlob(hash);
 }
Exemplo n.º 20
0
 @Override
 public void reset() throws Exception {
   blobStore.delete(BlobPath.cleanPath());
 }
Exemplo n.º 21
0
 @Override
 protected void delete(IndexMetaData indexMetaData) throws ElasticSearchException {
   BlobPath indexPath = basePath().add("indices").add(indexMetaData.index());
   blobStore.delete(indexPath);
 }