public void cleanupTimedOutTransactions() { if (trace) log.tracef( "About to cleanup remote transactions older than %d ms", configuration.transaction().completedTxTimeout()); long beginning = timeService.time(); long cutoffCreationTime = beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout()); List<GlobalTransaction> toKill = new ArrayList<>(); // Check remote transactions. for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) { GlobalTransaction gtx = e.getKey(); RemoteTransaction remoteTx = e.getValue(); if (remoteTx != null) { if (trace) log.tracef("Checking transaction %s", gtx); // Check the time. if (remoteTx.getCreationTime() - cutoffCreationTime < 0) { long duration = timeService.timeDuration( remoteTx.getCreationTime(), beginning, TimeUnit.MILLISECONDS); log.remoteTransactionTimeout(gtx, duration); toKill.add(gtx); } } } // Rollback the orphaned transactions and release any held locks. for (GlobalTransaction gtx : toKill) { killTransaction(gtx); } }
public void cleanupCompletedTransactions() { if (!completedTransactions.isEmpty()) { try { log.tracef( "About to cleanup completed transaction. Initial size is %d", completedTransactions.size()); // this iterator is weekly consistent and will never throw ConcurrentModificationException Iterator<Map.Entry<GlobalTransaction, Long>> iterator = completedTransactions.entrySet().iterator(); long timeout = configuration.transaction().completedTxTimeout(); int removedEntries = 0; long beginning = timeService.time(); while (iterator.hasNext()) { Map.Entry<GlobalTransaction, Long> e = iterator.next(); long ageMillis = timeService.timeDuration(e.getValue(), TimeUnit.MILLISECONDS); if (ageMillis >= timeout) { iterator.remove(); removedEntries++; } } long duration = timeService.timeDuration(beginning, TimeUnit.MILLISECONDS); log.tracef( "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, " + "current number of completed transactions is %d", removedEntries, duration, completedTransactions.size()); } catch (Exception e) { log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage()); } } }
private CompletableFuture<Void> visitSecondPhaseCommand( TxInvocationContext ctx, TransactionBoundaryCommand command, boolean commit, ExtendedStatistic duration, ExtendedStatistic counter) throws Throwable { GlobalTransaction globalTransaction = command.getGlobalTransaction(); if (trace) { log.tracef( "Visit 2nd phase command %s. Is it local? %s. Transaction is %s", command, ctx.isOriginLocal(), globalTransaction.globalId()); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); updateTime(duration, counter, start, end, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.setTransactionOutcome( commit, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.terminateTransaction(globalTransaction, true, true); return null; }); }
public synchronized RspList<Object> getResponseList() throws Exception { long expectedEndTime = timeService.expectedEndTime(timeout, MILLISECONDS); long waitingTime; while (expectedResponses > 0 && retval == null && (waitingTime = timeService.remainingTime(expectedEndTime, MILLISECONDS)) > 0) { try { this.wait(waitingTime); } catch (InterruptedException e) { // reset interruption flag Thread.currentThread().interrupt(); expectedResponses = -1; } } // Now we either have the response we need or aren't expecting any more responses - or have // run out of time. if (retval != null) return retval; else if (exception != null) throw exception; else if (expectedResponses == 0) throw new RpcException( format( "No more valid responses. Received invalid responses from all of %s", futures.values())); else throw new TimeoutException( format( "Timed out waiting for %s for valid responses from any of %s.", Util.prettyPrintTime(timeout), futures.values())); }
/** * Coalesces adjacent free entries to create larger free entries (so that the probability of * finding a free entry during allocation increases) */ private void mergeFreeEntries(List<FileEntry> entries) { long startTime = 0; if (trace) startTime = timeService.wallClockTime(); FileEntry lastEntry = null; FileEntry newEntry = null; int mergeCounter = 0; for (Iterator<FileEntry> it = entries.iterator(); it.hasNext(); ) { FileEntry fe = it.next(); if (fe.isLocked()) { continue; } // Merge any holes created (consecutive free entries) in the file if ((lastEntry != null) && (lastEntry.offset == (fe.offset + fe.size))) { if (newEntry == null) { newEntry = new FileEntry(fe.offset, fe.size + lastEntry.size); freeList.remove(lastEntry); mergeCounter++; } else { newEntry = new FileEntry(fe.offset, fe.size + newEntry.size); } freeList.remove(fe); mergeCounter++; } else { if (newEntry != null) { try { addNewFreeEntry(newEntry); if (trace) log.tracef( "Merged %d entries at %d:%d, %d free entries", mergeCounter, newEntry.offset, newEntry.size, freeList.size()); } catch (IOException e) { throw new PersistenceException("Could not add new merged entry", e); } newEntry = null; mergeCounter = 0; } } lastEntry = fe; } if (newEntry != null) { try { addNewFreeEntry(newEntry); if (trace) log.tracef( "Merged %d entries at %d:%d, %d free entries", mergeCounter, newEntry.offset, newEntry.size, freeList.size()); } catch (IOException e) { throw new PersistenceException("Could not add new merged entry", e); } } if (trace) log.tracef( "Total time taken for mergeFreeEntries: " + (timeService.wallClockTime() - startTime) + " (ms)"); }
@Override public <KOut, VOut> Map<KOut, VOut> reduce(ReduceCommand<KOut, VOut> reduceCommand) throws InterruptedException { Cache<?, ?> cache = cacheManager.getCache(reduceCommand.getCacheName()); Set<KOut> keys = reduceCommand.getKeys(); String taskId = reduceCommand.getTaskId(); Reducer<KOut, VOut> reducer = reduceCommand.getReducer(); boolean useIntermediateKeys = reduceCommand.isEmitCompositeIntermediateKeys(); boolean noInputKeys = keys == null || keys.isEmpty(); Cache<Object, List<VOut>> tmpCache = cacheManager.getCache(reduceCommand.getCacheName()); Map<KOut, VOut> result = new HashMap<KOut, VOut>(); if (noInputKeys) { // illegal state, raise exception throw new IllegalStateException( "Reduce phase of MapReduceTask " + taskId + " on node " + cdl.getAddress() + " executed with empty input keys"); } else { // first hook into lifecycle MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); log.tracef("For m/r task %s invoking %s at %s", taskId, reduceCommand, cdl.getAddress()); int interruptCount = 0; long start = log.isTraceEnabled() ? timeService.time() : 0; try { taskLifecycleService.onPreExecute(reducer, cache); for (KOut key : keys) { interruptCount++; if (checkInterrupt(interruptCount++) && Thread.currentThread().isInterrupted()) throw new InterruptedException(); // load result value from map phase List<VOut> value; if (useIntermediateKeys) { value = tmpCache.get(new IntermediateCompositeKey<KOut>(taskId, key)); } else { value = tmpCache.get(key); } // and reduce it VOut reduced = reducer.reduce(key, value.iterator()); result.put(key, reduced); log.tracef( "For m/r task %s reduced %s to %s at %s ", taskId, key, reduced, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Reduce for task %s took %s milliseconds", reduceCommand.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(reducer); } } return result; }
private static RspList<Object> processCalls( Map<Address, ReplicableCommand> commands, long timeout, ResponseMode mode, Marshaller marshaller, CommandAwareRpcDispatcher card, boolean oob, boolean ignoreLeavers) throws Exception { if (trace) log.tracef("Replication task sending %s with response mode %s", commands, mode); if (commands.isEmpty()) return new RspList<>(); RequestOptions opts = new RequestOptions(mode, timeout); // opts.setExclusionList(card.getChannel().getAddress()); Map<Address, Future<Object>> futures = new HashMap<Address, Future<Object>>(commands.size()); RspList<Object> retval = new RspList<>(); for (Map.Entry<Address, ReplicableCommand> cmd : commands.entrySet()) { Buffer buf = marshallCall(marshaller, cmd.getValue()); Address dest = cmd.getKey(); boolean rsvp = isRsvpCommand(cmd.getValue()); futures.put( dest, card.sendMessageWithFuture(constructMessage(buf, dest, oob, mode, rsvp, false), opts)); } // a get() on each future will block till that call completes. TimeService timeService = card.timeService; long waitTime = timeService.expectedEndTime(timeout, MILLISECONDS); for (Map.Entry<Address, Future<Object>> entry : futures.entrySet()) { Address target = entry.getKey(); try { retval.addRsp( target, entry.getValue().get(timeService.remainingTime(waitTime, MILLISECONDS), MILLISECONDS)); } catch (java.util.concurrent.TimeoutException te) { throw new TimeoutException( formatString( "Timed out after %s waiting for a response from %s", prettyPrintTime(timeout), target)); } catch (ExecutionException e) { if (ignoreLeavers && e.getCause() instanceof SuspectedException) { retval.addRsp(target, new ExceptionResponse((SuspectedException) e.getCause())); } else { throw wrapThrowableInException(e.getCause()); } } } return retval; }
private void waitForView(int viewId, long timeout) throws InterruptedException { if (this.viewId < viewId) { log.tracef( "Received a cache topology command with a higher view id: %s, our view id is %s", viewId, this.viewId); } long endTime = timeService.expectedEndTime(timeout, TimeUnit.MILLISECONDS); synchronized (viewUpdateLock) { while (this.viewId < viewId && !timeService.isTimeExpired(endTime)) { viewUpdateLock.wait(1000); } } if (this.viewId < viewId) { throw new TimeoutException("Timed out waiting for view " + viewId); } }
@Override public MarshalledEntry load(Object key) { if (!isValidKeyType(key)) { return null; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); try { if (entity == null) return null; InternalMetadata m = null; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Failed to marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (metadata != null && metadata.getMetadata() != null) { try { m = (InternalMetadata) marshaller.objectFromByteBuffer(metadata.getMetadata()); } catch (Exception e) { throw new JpaStoreException("Failed to unmarshall metadata", e); } if (m.isExpired(timeService.wallClockTime())) { return null; } } } if (trace) log.trace("Loaded " + entity + " (" + m + ")"); return marshallerEntryFactory.newMarshalledEntry(key, entity, m); } finally { try { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } catch (Exception e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Failed to load entry", e); } } } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
@Override public void setStatisticsEnabled(boolean enabled) { this.statisticsEnabled = enabled; if (enabled) { // yes technically we do not reset stats but we initialize them resetNanoseconds.set(ts.time()); } }
@Override public CompletableFuture<Void> visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable { if (trace) { log.tracef( "Visit Get All Command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); initStatsIfNecessary(rCtx); int numRemote = 0; Collection<?> keys = ((GetAllCommand) rCommand).getKeys(); for (Object key : keys) { if (isRemote(key)) numRemote++; } // TODO: tbh this seems like it doesn't work properly for statistics as each // one will have the duration of all the time for all gets... Maybe do an average // instead ? Either way this isn't very indicative if (numRemote > 0) { cacheStatisticManager.add( NUM_REMOTE_GET, numRemote, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( REMOTE_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } cacheStatisticManager.add( ALL_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( NUM_GET, keys.size(), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
private <KIn, VIn, KOut, VOut> Map<KOut, List<VOut>> combineForLocalReduction( MapCombineCommand<KIn, VIn, KOut, VOut> mcc, CollectableCollector<KOut, VOut> collector) { String taskId = mcc.getTaskId(); Reducer<KOut, VOut> combiner = mcc.getCombiner(); Map<KOut, List<VOut>> result = null; if (combiner != null) { result = new HashMap<KOut, List<VOut>>(); log.tracef("For m/r task %s invoking combiner %s at %s", taskId, mcc, cdl.getAddress()); MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); long start = log.isTraceEnabled() ? timeService.time() : 0; try { Cache<?, ?> cache = cacheManager.getCache(mcc.getCacheName()); taskLifecycleService.onPreExecute(combiner, cache); Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); for (Entry<KOut, List<VOut>> e : collectedValues.entrySet()) { VOut combined; List<VOut> list = e.getValue(); List<VOut> l = new LinkedList<VOut>(); if (list.size() > 1) { combined = combiner.reduce(e.getKey(), list.iterator()); } else { combined = list.get(0); } l.add(combined); result.put(e.getKey(), l); log.tracef( "For m/r task %s combined %s to %s at %s", taskId, e.getKey(), combined, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Combine for task %s took %s milliseconds", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(combiner); } } else { // Combiner not specified result = collector.collectedValues(); } return result; }
private MarshalledEntry<K, V> _load(Object key, boolean loadValue, boolean loadMetadata) { final FileEntry fe; resizeLock.readLock().lock(); try { synchronized (entries) { // lookup FileEntry of the key fe = entries.get(key); if (fe == null) return null; // Entries are removed due to expiration from {@link SingleFileStore#purge} if (fe.isExpired(timeService.wallClockTime())) { return null; } else { // lock entry for reading before releasing entries monitor fe.lock(); } } } finally { resizeLock.readLock().unlock(); } org.infinispan.commons.io.ByteBuffer valueBb = null; org.infinispan.commons.io.ByteBuffer metadataBb = null; // If we only require the key, then no need to read disk if (!loadValue && !loadMetadata) { try { return ctx.getMarshalledEntryFactory().newMarshalledEntry(key, valueBb, metadataBb); } finally { fe.unlock(); } } final byte[] data; try { // load serialized data from disk data = new byte[fe.keyLen + fe.dataLen + (loadMetadata ? fe.metadataLen : 0)]; // The entry lock will prevent clear() from truncating the file at this point channel.read(ByteBuffer.wrap(data), fe.offset + KEY_POS); } catch (Exception e) { throw new PersistenceException(e); } finally { // No need to keep the lock for deserialization. // FileEntry is immutable, so its members can't be changed by another thread. fe.unlock(); } if (trace) log.tracef("Read entry %s at %d:%d", key, fe.offset, fe.actualSize()); ByteBufferFactory factory = ctx.getByteBufferFactory(); org.infinispan.commons.io.ByteBuffer keyBb = factory.newByteBuffer(data, 0, fe.keyLen); if (loadValue) { valueBb = factory.newByteBuffer(data, fe.keyLen, fe.dataLen); } if (loadMetadata && fe.metadataLen > 0) { metadataBb = factory.newByteBuffer(data, fe.keyLen + fe.dataLen, fe.metadataLen); } return ctx.getMarshalledEntryFactory().newMarshalledEntry(keyBb, valueBb, metadataBb); }
@Override public boolean containsKey(Object k) { InternalCacheEntry<K, V> ice = peek(k); if (ice != null && ice.canExpire() && ice.isExpired(timeService.wallClockTime())) { entries.remove(k); ice = null; } return ice != null; }
@Override @ManagedOperation( description = "Resets statistics gathered by this component", displayName = "Reset statistics") public void resetStatistics() { if (isStatisticsEnabled()) { reset(); resetNanoseconds.set(ts.time()); } }
@Override public void write(MarshalledEntry entry) { EntityManager em = emf.createEntityManager(); Object entity = entry.getValue(); MetadataEntity metadata = configuration.storeMetadata() ? new MetadataEntity( entry.getKeyBytes(), entry.getMetadataBytes(), entry.getMetadata() == null ? Long.MAX_VALUE : entry.getMetadata().expiryTime()) : null; try { if (!configuration.entityClass().isAssignableFrom(entity.getClass())) { throw new JpaStoreException( String.format( "This cache is configured with JPA CacheStore to only store values of type %s - cannot write %s = %s", configuration.entityClass().getName(), entity, entity.getClass().getName())); } else { EntityTransaction txn = em.getTransaction(); Object id = emf.getPersistenceUnitUtil().getIdentifier(entity); if (!entry.getKey().equals(id)) { throw new JpaStoreException( "Entity id value must equal to key of cache entry: " + "key = [" + entry.getKey() + "], id = [" + id + "]"); } long txnBegin = timeService.time(); try { if (trace) log.trace("Writing " + entity + "(" + toString(metadata) + ")"); txn.begin(); long entityMergeBegin = timeService.time(); em.merge(entity); stats.addEntityMerge(timeService.time() - entityMergeBegin); if (metadata != null && metadata.hasBytes()) { long metadataMergeBegin = timeService.time(); em.merge(metadata); stats.addMetadataMerge(timeService.time() - metadataMergeBegin); } txn.commit(); stats.addWriteTxCommited(timeService.time() - txnBegin); } catch (Exception e) { stats.addWriteTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Exception caught in write()", e); } finally { if (txn != null && txn.isActive()) txn.rollback(); } } } finally { em.close(); } }
@Override public CompletableFuture<Void> visitPrepareCommand( TxInvocationContext ctx, PrepareCommand command) throws Throwable { GlobalTransaction globalTransaction = command.getGlobalTransaction(); if (trace) { log.tracef( "Visit Prepare command %s. Is it local?. Transaction is %s", command, ctx.isOriginLocal(), globalTransaction.globalId()); } initStatsIfNecessary(ctx); cacheStatisticManager.onPrepareCommand(globalTransaction, ctx.isOriginLocal()); if (command.hasModifications()) { cacheStatisticManager.markAsWriteTransaction(globalTransaction, ctx.isOriginLocal()); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { processWriteException(rCtx, globalTransaction, throwable); } else { long end = timeService.time(); updateTime( PREPARE_EXECUTION_TIME, NUM_PREPARE_COMMAND, start, end, globalTransaction, rCtx.isOriginLocal()); } if (((PrepareCommand) rCommand).isOnePhaseCommit()) { boolean local = rCtx.isOriginLocal(); boolean success = throwable == null; cacheStatisticManager.setTransactionOutcome( success, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.terminateTransaction(globalTransaction, local, !local); } return null; }); }
@Override public CompletableFuture<Void> visitGetKeyValueCommand( InvocationContext ctx, GetKeyValueCommand command) throws Throwable { if (trace) { log.tracef( "Visit Get Key Value command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); initStatsIfNecessary(rCtx); Object key = ((GetKeyValueCommand) rCommand).getKey(); if (isRemote(key)) { cacheStatisticManager.increment( NUM_REMOTE_GET, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( REMOTE_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } cacheStatisticManager.add( ALL_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.increment( NUM_GET, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
private synchronized void fetchClusterWideStatsIfNeeded() { if (launchNewDistTask()) { try { List<CompletableFuture<Map<String, Number>>> responseList = des.submitEverywhere(new DistributedCacheStatsCallable()); updateFieldsFromResponseMap(responseList); } catch (Exception e) { log.warn("Could not execute cluster wide cache stats operation ", e); } finally { statsUpdateTimestamp = ts.time(); } } }
@Override @ManagedAttribute( description = "Number of seconds since the cluster-wide cache statistics were last reset", displayName = "Seconds since cluster-wide cache statistics were reset", units = Units.SECONDS, displayType = DisplayType.SUMMARY) public long getTimeSinceReset() { long result = -1; if (isStatisticsEnabled()) { result = ts.timeDuration(resetNanoseconds.get(), TimeUnit.SECONDS); } return result; }
/** Removes free entries towards the end of the file and truncates the file. */ private void truncateFile(List<FileEntry> entries) { long startTime = 0; if (trace) startTime = timeService.wallClockTime(); int reclaimedSpace = 0; int removedEntries = 0; long truncateOffset = -1; for (Iterator<FileEntry> it = entries.iterator(); it.hasNext(); ) { FileEntry fe = it.next(); // Till we have free entries at the end of the file, // we can remove them and contract the file to release disk // space. if (!fe.isLocked() && ((fe.offset + fe.size) == filePos)) { truncateOffset = fe.offset; filePos = fe.offset; freeList.remove(fe); it.remove(); reclaimedSpace += fe.size; removedEntries++; } else { break; } } if (truncateOffset > 0) { try { channel.truncate(truncateOffset); } catch (IOException e) { throw new PersistenceException("Error while truncating file", e); } } if (trace) { log.tracef("Removed entries: " + removedEntries + ", Reclaimed Space: " + reclaimedSpace); log.tracef( "Time taken for truncateFile: " + (timeService.wallClockTime() - startTime) + " (ms)"); } }
private void updateTime( ExtendedStatistic duration, ExtendedStatistic counter, long initTime, long endTime, GlobalTransaction globalTransaction, boolean local) { cacheStatisticManager.add( duration, timeService.timeDuration(initTime, endTime, NANOSECONDS), globalTransaction, local); cacheStatisticManager.increment(counter, globalTransaction, local); }
@Override public InternalCacheEntry<K, V> get(Object k) { InternalCacheEntry<K, V> e = entries.get(k); if (e != null && e.canExpire()) { long currentTimeMillis = timeService.wallClockTime(); if (e.isExpired(currentTimeMillis)) { expirationManager.handleInMemoryExpiration(e, currentTimeMillis); e = null; } else { e.touch(currentTimeMillis); } } return e; }
private CompletableFuture<Void> visitWriteCommand( InvocationContext ctx, WriteCommand command, Object key) throws Throwable { if (trace) { log.tracef( "Visit write command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { long end = timeService.time(); initStatsIfNecessary(rCtx); if (throwable != null) { processWriteException(rCtx, getGlobalTransaction(rCtx), throwable); } else { if (isRemote(key)) { cacheStatisticManager.add( REMOTE_PUT_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.increment( NUM_REMOTE_PUT, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } } cacheStatisticManager.increment( NUM_PUT, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.markAsWriteTransaction( getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
@Override public boolean contains(Object key) { if (!isValidKeyType(key)) { return false; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (trace) log.trace("Entity " + key + " -> " + entity); try { if (entity == null) return false; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Cannot marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (trace) log.trace("Metadata " + key + " -> " + toString(metadata)); return metadata == null || metadata.expiration > timeService.wallClockTime(); } else { return true; } } finally { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } } catch (RuntimeException e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
private void shutDownGracefully() { if (log.isDebugEnabled()) log.debugf( "Wait for on-going transactions to finish for %s.", Util.prettyPrintTime( configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS)); long failTime = timeService.expectedEndTime( configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS); boolean txsOnGoing = areTxsOnGoing(); while (txsOnGoing && !timeService.isTimeExpired(failTime)) { try { Thread.sleep(30); txsOnGoing = areTxsOnGoing(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); if (clustered) { log.debugf( "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions", localTransactions.size(), remoteTransactions.size()); } else { log.debugf( "Interrupted waiting for %s on-going transactions to finish.", localTransactions.size()); } } } if (txsOnGoing) { log.unfinishedTransactionsRemain( localTransactions == null ? 0 : localTransactions.size(), remoteTransactions == null ? 0 : remoteTransactions.size()); } else { log.debug("All transactions terminated"); } }
@Override public InternalCacheEntry<K, V> remove(Object k) { final InternalCacheEntry<K, V>[] reference = new InternalCacheEntry[1]; entries.compute( (K) k, (key, entry) -> { activator.onRemove(key, entry == null); reference[0] = entry; return null; }); InternalCacheEntry<K, V> e = reference[0]; if (trace) { log.tracef("Removed %s from container", e); } return e == null || (e.canExpire() && e.isExpired(timeService.wallClockTime())) ? null : e; }
@Override public void purge(Executor threadPool, final PurgeListener task) { long now = timeService.wallClockTime(); List<KeyValuePair<Object, FileEntry>> entriesToPurge = new ArrayList<KeyValuePair<Object, FileEntry>>(); synchronized (entries) { for (Iterator<Map.Entry<K, FileEntry>> it = entries.entrySet().iterator(); it.hasNext(); ) { Map.Entry<K, FileEntry> next = it.next(); FileEntry fe = next.getValue(); if (fe.isExpired(now)) { it.remove(); entriesToPurge.add(new KeyValuePair<Object, FileEntry>(next.getKey(), fe)); } } } resizeLock.readLock().lock(); try { for (Iterator<KeyValuePair<Object, FileEntry>> it = entriesToPurge.iterator(); it.hasNext(); ) { KeyValuePair<Object, FileEntry> next = it.next(); FileEntry fe = next.getValue(); if (fe.isExpired(now)) { it.remove(); try { free(fe); } catch (Exception e) { throw new PersistenceException(e); } if (task != null) task.entryPurged(next.getKey()); } } // Disk space optimizations synchronized (freeList) { processFreeEntries(); } } finally { resizeLock.readLock().unlock(); } }
private boolean launchNewDistTask() { long duration = ts.timeDuration(statsUpdateTimestamp, ts.time(), TimeUnit.MILLISECONDS); return duration > staleStatsTreshold; }
protected <KIn, VIn, KOut, VOut> Set<KOut> combine( MapCombineCommand<KIn, VIn, KOut, VOut> mcc, CollectableCollector<KOut, VOut> collector) throws Exception { String taskId = mcc.getTaskId(); boolean emitCompositeIntermediateKeys = mcc.isEmitCompositeIntermediateKeys(); Reducer<KOut, VOut> combiner = mcc.getCombiner(); Set<KOut> mapPhaseKeys = new HashSet<KOut>(); Cache<Object, DeltaAwareList<VOut>> tmpCache = null; if (emitCompositeIntermediateKeys) { tmpCache = cacheManager.getCache(DEFAULT_TMP_CACHE_CONFIGURATION_NAME); } else { tmpCache = cacheManager.getCache(taskId); } if (tmpCache == null) { throw new IllegalStateException( "Temporary cache for MapReduceTask " + taskId + " not found on " + cdl.getAddress()); } DistributionManager dm = tmpCache.getAdvancedCache().getDistributionManager(); if (combiner != null) { Cache<?, ?> cache = cacheManager.getCache(mcc.getCacheName()); log.tracef("For m/r task %s invoking combiner %s at %s", taskId, mcc, cdl.getAddress()); MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); Map<KOut, VOut> combinedMap = new ConcurrentHashMap<KOut, VOut>(); long start = log.isTraceEnabled() ? timeService.time() : 0; try { taskLifecycleService.onPreExecute(combiner, cache); Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); for (Entry<KOut, List<VOut>> e : collectedValues.entrySet()) { List<VOut> list = e.getValue(); VOut combined; if (list.size() > 1) { combined = combiner.reduce(e.getKey(), list.iterator()); combinedMap.put(e.getKey(), combined); } else { combined = list.get(0); combinedMap.put(e.getKey(), combined); } log.tracef( "For m/r task %s combined %s to %s at %s", taskId, e.getKey(), combined, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Combine for task %s took %s milliseconds", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(combiner); } Map<Address, List<KOut>> keysToNodes = mapKeysToNodes(dm, taskId, combinedMap.keySet(), emitCompositeIntermediateKeys); start = log.isTraceEnabled() ? timeService.time() : 0; try { for (Entry<Address, List<KOut>> entry : keysToNodes.entrySet()) { List<KOut> keysHashedToAddress = entry.getValue(); try { log.tracef( "For m/r task %s migrating intermediate keys %s to %s", taskId, keysHashedToAddress, entry.getKey()); for (KOut key : keysHashedToAddress) { VOut value = combinedMap.get(key); DeltaAwareList<VOut> delta = new DeltaAwareList<VOut>(value); if (emitCompositeIntermediateKeys) { tmpCache.put(new IntermediateCompositeKey<KOut>(taskId, key), delta); } else { tmpCache.put(key, delta); } mapPhaseKeys.add(key); } } catch (Exception e) { throw new CacheException( "Could not move intermediate keys/values for M/R task " + taskId, e); } } } finally { if (log.isTraceEnabled()) { log.tracef( "Migrating keys for task %s took %s milliseconds (Migrated %s keys)", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS), mapPhaseKeys.size()); } } } else { // Combiner not specified so lets insert each key/uncombined-List pair into tmp cache Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); Map<Address, List<KOut>> keysToNodes = mapKeysToNodes(dm, taskId, collectedValues.keySet(), emitCompositeIntermediateKeys); long start = log.isTraceEnabled() ? timeService.time() : 0; try { for (Entry<Address, List<KOut>> entry : keysToNodes.entrySet()) { List<KOut> keysHashedToAddress = entry.getValue(); try { log.tracef( "For m/r task %s migrating intermediate keys %s to %s", taskId, keysHashedToAddress, entry.getKey()); for (KOut key : keysHashedToAddress) { List<VOut> value = collectedValues.get(key); DeltaAwareList<VOut> delta = new DeltaAwareList<VOut>(value); if (emitCompositeIntermediateKeys) { tmpCache.put(new IntermediateCompositeKey<KOut>(taskId, key), delta); } else { tmpCache.put(key, delta); } mapPhaseKeys.add(key); } } catch (Exception e) { throw new CacheException( "Could not move intermediate keys/values for M/R task " + taskId, e); } } } finally { if (log.isTraceEnabled()) { log.tracef( "Migrating keys for task %s took %s milliseconds (Migrated %s keys)", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS), mapPhaseKeys.size()); } } } return mapPhaseKeys; }