/**
  * The {@link InfinispanDirectory#deleteFile(String)} is not deleting the elements from the cache
  * but instead flagging the file as deletable. This method will really remove the elements from
  * the cache; should be invoked only by {@link #deleteOrReleaseReadLock(String)} after having
  * verified that there are no users left in need to read these chunks.
  *
  * @param readLockKey the key representing the values to be deleted
  * @param locksCache the cache containing the locks
  * @param chunksCache the cache containing the chunks to be deleted
  * @param metadataCache the cache containing the metadata of elements to be deleted
  */
 static void realFileDelete(
     FileReadLockKey readLockKey,
     AdvancedCache<Object, Integer> locksCache,
     AdvancedCache<?, ?> chunksCache,
     AdvancedCache<?, ?> metadataCache) {
   final boolean trace = log.isTraceEnabled();
   final String indexName = readLockKey.getIndexName();
   final String filename = readLockKey.getFileName();
   FileCacheKey key = new FileCacheKey(indexName, filename);
   if (trace) log.tracef("deleting metadata: %s", key);
   FileMetadata file = (FileMetadata) metadataCache.remove(key);
   if (file
       != null) { // during optimization of index a same file could be deleted twice, so you could
                  // see a null here
     for (int i = 0; i < file.getNumberOfChunks(); i++) {
       ChunkCacheKey chunkKey = new ChunkCacheKey(indexName, filename, i);
       if (trace) log.tracef("deleting chunk: %s", chunkKey);
       chunksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP, Flag.SKIP_CACHE_LOAD).removeAsync(chunkKey);
     }
   }
   // last operation, as being set as value==0 it prevents others from using it during the
   // deletion process:
   if (trace) log.tracef("deleting readlock: %s", readLockKey);
   locksCache.withFlags(Flag.SKIP_REMOTE_LOOKUP).removeAsync(readLockKey);
 }
 /** Load implementation for FileCacheKey: must return the metadata of the requested file. */
 private FileMetadata loadIntern(final FileCacheKey key) throws IOException {
   final String fileName = key.getFileName();
   final long fileLength = directory.fileLength(fileName);
   // We're forcing the buffer size of a to-be-read segment to the full file size:
   final int bufferSize = (int) Math.min(fileLength, (long) autoChunkSize);
   final FileMetadata meta = new FileMetadata(bufferSize);
   meta.setSize(fileLength);
   return meta;
 }
Ejemplo n.º 3
0
  @Test
  public void testMultiThreaded() {
    final Cache<Object, Object> metadata = cacheManager.getCache("metadata");
    final Cache<Object, Object> chunks = cacheManager.getCache("chunks");
    final Cache<Object, Integer> locks = cacheManager.getCache("locks");

    FileMetadata fileMetadata = new FileMetadata(10);
    fileMetadata.setSize(11); // Make it chunked otherwise no read lock will involved
    metadata.put(new FileCacheKey("indexName", "fileName", -1), fileMetadata);
    final LocalLockMergingSegmentReadLocker locker =
        new LocalLockMergingSegmentReadLocker(locks, chunks, metadata, "indexName", -1);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    final ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);

    Runnable stressor =
        new Runnable() {
          @Override
          public void run() {
            try {
              int counter = 0;
              while (testFailed.get() == false) {

                locker.acquireReadLock("fileName");
                Thread.sleep(2);
                locker.deleteOrReleaseReadLock("fileName");

                // Take a break every now and a again to try and avoid the same LocalReadLock being
                // used constantly
                if (counter++ % 900 == 0) {
                  System.out.print(".");
                  Thread.sleep(7);
                }
                if (metadata.get(new FileCacheKey("indexName", "fileName", -1)) == null) {
                  // Shouldn't have been deleted!
                  testFailed.set(true);
                  System.out.print("X");
                }
              }
            } catch (InterruptedException e) {
              Thread.currentThread().interrupt();
              return;
            }
          }
        };
    for (int i = 0; i < NUM_THREADS; i++) {
      exec.execute(stressor);
    }
    System.out.println("Stressor threads started...");
    exec.shutdown();
    try {
      exec.awaitTermination(TEST_MINUTES_MAX, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
      exec.shutdownNow();
    }
    AssertJUnit.assertFalse(testFailed.get());
  }
Ejemplo n.º 4
0
  public void testAutoChunkingOnLargeFiles() throws IOException {
    Directory mockDirectory = createMockDirectory();

    FileCacheKey k = new FileCacheKey(INDEX_NAME, FILE_NAME, segmentId);
    DirectoryLoaderAdaptor adaptor =
        new DirectoryLoaderAdaptor(mockDirectory, INDEX_NAME, AUTO_BUFFER, -1);
    Object loaded = adaptor.load(k);
    AssertJUnit.assertTrue(loaded instanceof FileMetadata);
    FileMetadata metadata = (FileMetadata) loaded;
    AssertJUnit.assertEquals(TEST_SIZE, metadata.getSize());
    AssertJUnit.assertEquals(AUTO_BUFFER, metadata.getBufferSize());
  }