/** * Coalesces adjacent free entries to create larger free entries (so that the probability of * finding a free entry during allocation increases) */ private void mergeFreeEntries(List<FileEntry> entries) { long startTime = 0; if (trace) startTime = timeService.wallClockTime(); FileEntry lastEntry = null; FileEntry newEntry = null; int mergeCounter = 0; for (Iterator<FileEntry> it = entries.iterator(); it.hasNext(); ) { FileEntry fe = it.next(); if (fe.isLocked()) { continue; } // Merge any holes created (consecutive free entries) in the file if ((lastEntry != null) && (lastEntry.offset == (fe.offset + fe.size))) { if (newEntry == null) { newEntry = new FileEntry(fe.offset, fe.size + lastEntry.size); freeList.remove(lastEntry); mergeCounter++; } else { newEntry = new FileEntry(fe.offset, fe.size + newEntry.size); } freeList.remove(fe); mergeCounter++; } else { if (newEntry != null) { try { addNewFreeEntry(newEntry); if (trace) log.tracef( "Merged %d entries at %d:%d, %d free entries", mergeCounter, newEntry.offset, newEntry.size, freeList.size()); } catch (IOException e) { throw new PersistenceException("Could not add new merged entry", e); } newEntry = null; mergeCounter = 0; } } lastEntry = fe; } if (newEntry != null) { try { addNewFreeEntry(newEntry); if (trace) log.tracef( "Merged %d entries at %d:%d, %d free entries", mergeCounter, newEntry.offset, newEntry.size, freeList.size()); } catch (IOException e) { throw new PersistenceException("Could not add new merged entry", e); } } if (trace) log.tracef( "Total time taken for mergeFreeEntries: " + (timeService.wallClockTime() - startTime) + " (ms)"); }
private MarshalledEntry<K, V> _load(Object key, boolean loadValue, boolean loadMetadata) { final FileEntry fe; resizeLock.readLock().lock(); try { synchronized (entries) { // lookup FileEntry of the key fe = entries.get(key); if (fe == null) return null; // Entries are removed due to expiration from {@link SingleFileStore#purge} if (fe.isExpired(timeService.wallClockTime())) { return null; } else { // lock entry for reading before releasing entries monitor fe.lock(); } } } finally { resizeLock.readLock().unlock(); } org.infinispan.commons.io.ByteBuffer valueBb = null; org.infinispan.commons.io.ByteBuffer metadataBb = null; // If we only require the key, then no need to read disk if (!loadValue && !loadMetadata) { try { return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, valueBb, metadataBb); } finally { fe.unlock(); } } final byte[] data; try { // load serialized data from disk data = new byte[fe.keyLen + fe.dataLen + (loadMetadata ? fe.metadataLen : 0)]; // The entry lock will prevent clear() from truncating the file at this point channel.read(ByteBuffer.wrap(data), fe.offset + KEY_POS); } catch (Exception e) { throw new PersistenceException(e); } finally { // No need to keep the lock for deserialization. // FileEntry is immutable, so its members can't be changed by another thread. fe.unlock(); } if (trace) log.tracef("Read entry %s at %d:%d", key, fe.offset, fe.actualSize()); ByteBufferFactory factory = ctx.getByteBufferFactory(); org.infinispan.commons.io.ByteBuffer keyBb = factory.newByteBuffer(data, 0, fe.keyLen); if (loadValue) { valueBb = factory.newByteBuffer(data, fe.keyLen, fe.dataLen); } if (loadMetadata && fe.metadataLen > 0) { metadataBb = factory.newByteBuffer(data, fe.keyLen + fe.dataLen, fe.metadataLen); } return ctx.getMarshalledEntryFactory().newMarshalledEntry(keyBb, valueBb, metadataBb); }
@Override public boolean containsKey(Object k) { InternalCacheEntry<K, V> ice = peek(k); if (ice != null && ice.canExpire() && ice.isExpired(timeService.wallClockTime())) { entries.remove(k); ice = null; } return ice != null; }
@Override public MarshalledEntry load(Object key) { if (!isValidKeyType(key)) { return null; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); try { if (entity == null) return null; InternalMetadata m = null; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Failed to marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (metadata != null && metadata.getMetadata() != null) { try { m = (InternalMetadata) marshaller.objectFromByteBuffer(metadata.getMetadata()); } catch (Exception e) { throw new JpaStoreException("Failed to unmarshall metadata", e); } if (m.isExpired(timeService.wallClockTime())) { return null; } } } if (trace) log.trace("Loaded " + entity + " (" + m + ")"); return marshallerEntryFactory.newMarshalledEntry(key, entity, m); } finally { try { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } catch (Exception e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Failed to load entry", e); } } } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
/** Removes free entries towards the end of the file and truncates the file. */ private void truncateFile(List<FileEntry> entries) { long startTime = 0; if (trace) startTime = timeService.wallClockTime(); int reclaimedSpace = 0; int removedEntries = 0; long truncateOffset = -1; for (Iterator<FileEntry> it = entries.iterator(); it.hasNext(); ) { FileEntry fe = it.next(); // Till we have free entries at the end of the file, // we can remove them and contract the file to release disk // space. if (!fe.isLocked() && ((fe.offset + fe.size) == filePos)) { truncateOffset = fe.offset; filePos = fe.offset; freeList.remove(fe); it.remove(); reclaimedSpace += fe.size; removedEntries++; } else { break; } } if (truncateOffset > 0) { try { channel.truncate(truncateOffset); } catch (IOException e) { throw new PersistenceException("Error while truncating file", e); } } if (trace) { log.tracef("Removed entries: " + removedEntries + ", Reclaimed Space: " + reclaimedSpace); log.tracef( "Time taken for truncateFile: " + (timeService.wallClockTime() - startTime) + " (ms)"); } }
@Override public InternalCacheEntry<K, V> get(Object k) { InternalCacheEntry<K, V> e = entries.get(k); if (e != null && e.canExpire()) { long currentTimeMillis = timeService.wallClockTime(); if (e.isExpired(currentTimeMillis)) { expirationManager.handleInMemoryExpiration(e, currentTimeMillis); e = null; } else { e.touch(currentTimeMillis); } } return e; }
@Override public InternalCacheEntry<K, V> remove(Object k) { final InternalCacheEntry<K, V>[] reference = new InternalCacheEntry[1]; entries.compute( (K) k, (key, entry) -> { activator.onRemove(key, entry == null); reference[0] = entry; return null; }); InternalCacheEntry<K, V> e = reference[0]; if (trace) { log.tracef("Removed %s from container", e); } return e == null || (e.canExpire() && e.isExpired(timeService.wallClockTime())) ? null : e; }
@Override public boolean contains(Object key) { if (!isValidKeyType(key)) { return false; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (trace) log.trace("Entity " + key + " -> " + entity); try { if (entity == null) return false; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Cannot marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (trace) log.trace("Metadata " + key + " -> " + toString(metadata)); return metadata == null || metadata.expiration > timeService.wallClockTime(); } else { return true; } } finally { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } } catch (RuntimeException e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
@Override public void purge(Executor threadPool, final PurgeListener task) { long now = timeService.wallClockTime(); List<KeyValuePair<Object, FileEntry>> entriesToPurge = new ArrayList<KeyValuePair<Object, FileEntry>>(); synchronized (entries) { for (Iterator<Map.Entry<K, FileEntry>> it = entries.entrySet().iterator(); it.hasNext(); ) { Map.Entry<K, FileEntry> next = it.next(); FileEntry fe = next.getValue(); if (fe.isExpired(now)) { it.remove(); entriesToPurge.add(new KeyValuePair<Object, FileEntry>(next.getKey(), fe)); } } } resizeLock.readLock().lock(); try { for (Iterator<KeyValuePair<Object, FileEntry>> it = entriesToPurge.iterator(); it.hasNext(); ) { KeyValuePair<Object, FileEntry> next = it.next(); FileEntry fe = next.getValue(); if (fe.isExpired(now)) { it.remove(); try { free(fe); } catch (Exception e) { throw new PersistenceException(e); } if (task != null) task.entryPurged(next.getKey()); } } // Disk space optimizations synchronized (freeList) { processFreeEntries(); } } finally { resizeLock.readLock().unlock(); } }
/** * The base class implementation calls {@link #load(Object)} for this, we can do better because we * keep all keys in memory. */ @Override public boolean contains(Object key) { FileEntry entry = entries.get(key); return entry != null && !entry.isExpired(timeService.wallClockTime()); }
@Override public void purge(Executor threadPool, final PurgeListener listener) { ExecutorAllCompletionService eacs = new ExecutorAllCompletionService(threadPool); EntityManager em = emf.createEntityManager(); try { CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery<MetadataEntity> cq = cb.createQuery(MetadataEntity.class); Root root = cq.from(MetadataEntity.class); long currentTime = timeService.wallClockTime(); cq.where(cb.le(root.get(MetadataEntity.EXPIRATION), currentTime)); for (MetadataEntity metadata : em.createQuery(cq).getResultList()) { EntityTransaction txn = em.getTransaction(); final Object key; try { key = marshaller.objectFromByteBuffer(metadata.name); } catch (Exception e) { throw new JpaStoreException("Cannot unmarshall key", e); } long txnBegin = timeService.time(); txn.begin(); try { long metadataFindBegin = timeService.time(); metadata = em.find(MetadataEntity.class, metadata.name); stats.addMetadataFind(timeService.time() - metadataFindBegin); // check for transaction - I hope write skew check is done here if (metadata.expiration > currentTime) { txn.rollback(); continue; } long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (entity != null) { // the entry may have been removed long entityRemoveBegin = timeService.time(); em.remove(entity); stats.addEntityRemove(timeService.time() - entityRemoveBegin); } long metadataRemoveBegin = timeService.time(); em.remove(metadata); stats.addMetadataRemove(timeService.time() - metadataRemoveBegin); txn.commit(); stats.addRemoveTxCommitted(timeService.time() - txnBegin); if (trace) log.trace("Expired " + key + " -> " + entity + "(" + toString(metadata) + ")"); if (listener != null) { eacs.submit( new Runnable() { @Override public void run() { listener.entryPurged(key); } }, null); } } catch (RuntimeException e) { stats.addRemoveTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) { txn.rollback(); } } } } finally { em.close(); } eacs.waitUntilAllCompleted(); if (eacs.isExceptionThrown()) { throw new JpaStoreException(eacs.getFirstException()); } }
@Override public void process( KeyFilter filter, final CacheLoaderTask task, Executor executor, boolean fetchValue, final boolean fetchMetadata) { ExecutorAllCompletionService eacs = new ExecutorAllCompletionService(executor); final TaskContextImpl taskContext = new TaskContextImpl(); EntityManager em = emf.createEntityManager(); try { CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery cq = cb.createQuery(); Root root = cq.from(configuration.entityClass()); Type idType = root.getModel().getIdType(); SingularAttribute idAttr = root.getModel().getId(idType.getJavaType()); cq.select(root.get(idAttr)); for (final Object key : em.createQuery(cq).getResultList()) { if (taskContext.isStopped()) break; if (filter != null && !filter.shouldLoadKey(key)) { if (trace) log.trace("Key " + key + " filtered"); continue; } EntityTransaction txn = em.getTransaction(); Object tempEntity = null; InternalMetadata tempMetadata = null; boolean loaded = false; txn.begin(); try { do { try { tempEntity = fetchValue ? em.find(configuration.entityClass(), key) : null; tempMetadata = fetchMetadata ? getMetadata(em, key) : null; } finally { try { txn.commit(); loaded = true; } catch (Exception e) { log.trace("Failed to load once", e); } } } while (!loaded); } finally { if (txn != null && txn.isActive()) txn.rollback(); } final Object entity = tempEntity; final InternalMetadata metadata = tempMetadata; if (trace) log.trace("Processing " + key + " -> " + entity + "(" + metadata + ")"); if (metadata != null && metadata.isExpired(timeService.wallClockTime())) continue; eacs.submit( new Callable<Void>() { @Override public Void call() throws Exception { try { final MarshalledEntry marshalledEntry = marshallerEntryFactory.newMarshalledEntry(key, entity, metadata); if (marshalledEntry != null) { task.processEntry(marshalledEntry, taskContext); } return null; } catch (Exception e) { log.errorExecutingParallelStoreTask(e); throw e; } } }); } eacs.waitUntilAllCompleted(); if (eacs.isExceptionThrown()) { throw new org.infinispan.persistence.spi.PersistenceException( "Execution exception!", eacs.getFirstException()); } } finally { em.close(); } }