private CompletableFuture<Void> visitSecondPhaseCommand( TxInvocationContext ctx, TransactionBoundaryCommand command, boolean commit, ExtendedStatistic duration, ExtendedStatistic counter) throws Throwable { GlobalTransaction globalTransaction = command.getGlobalTransaction(); if (trace) { log.tracef( "Visit 2nd phase command %s. Is it local? %s. Transaction is %s", command, ctx.isOriginLocal(), globalTransaction.globalId()); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); updateTime(duration, counter, start, end, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.setTransactionOutcome( commit, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.terminateTransaction(globalTransaction, true, true); return null; }); }
public void cleanupTimedOutTransactions() { if (trace) log.tracef( "About to cleanup remote transactions older than %d ms", configuration.transaction().completedTxTimeout()); long beginning = timeService.time(); long cutoffCreationTime = beginning - TimeUnit.MILLISECONDS.toNanos(configuration.transaction().completedTxTimeout()); List<GlobalTransaction> toKill = new ArrayList<>(); // Check remote transactions. for (Map.Entry<GlobalTransaction, RemoteTransaction> e : remoteTransactions.entrySet()) { GlobalTransaction gtx = e.getKey(); RemoteTransaction remoteTx = e.getValue(); if (remoteTx != null) { if (trace) log.tracef("Checking transaction %s", gtx); // Check the time. if (remoteTx.getCreationTime() - cutoffCreationTime < 0) { long duration = timeService.timeDuration( remoteTx.getCreationTime(), beginning, TimeUnit.MILLISECONDS); log.remoteTransactionTimeout(gtx, duration); toKill.add(gtx); } } } // Rollback the orphaned transactions and release any held locks. for (GlobalTransaction gtx : toKill) { killTransaction(gtx); } }
public void cleanupCompletedTransactions() { if (!completedTransactions.isEmpty()) { try { log.tracef( "About to cleanup completed transaction. Initial size is %d", completedTransactions.size()); // this iterator is weekly consistent and will never throw ConcurrentModificationException Iterator<Map.Entry<GlobalTransaction, Long>> iterator = completedTransactions.entrySet().iterator(); long timeout = configuration.transaction().completedTxTimeout(); int removedEntries = 0; long beginning = timeService.time(); while (iterator.hasNext()) { Map.Entry<GlobalTransaction, Long> e = iterator.next(); long ageMillis = timeService.timeDuration(e.getValue(), TimeUnit.MILLISECONDS); if (ageMillis >= timeout) { iterator.remove(); removedEntries++; } } long duration = timeService.timeDuration(beginning, TimeUnit.MILLISECONDS); log.tracef( "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, " + "current number of completed transactions is %d", removedEntries, duration, completedTransactions.size()); } catch (Exception e) { log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage()); } } }
@Override public void setStatisticsEnabled(boolean enabled) { this.statisticsEnabled = enabled; if (enabled) { // yes technically we do not reset stats but we initialize them resetNanoseconds.set(ts.time()); } }
@Override public CompletableFuture<Void> visitGetAllCommand(InvocationContext ctx, GetAllCommand command) throws Throwable { if (trace) { log.tracef( "Visit Get All Command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); initStatsIfNecessary(rCtx); int numRemote = 0; Collection<?> keys = ((GetAllCommand) rCommand).getKeys(); for (Object key : keys) { if (isRemote(key)) numRemote++; } // TODO: tbh this seems like it doesn't work properly for statistics as each // one will have the duration of all the time for all gets... Maybe do an average // instead ? Either way this isn't very indicative if (numRemote > 0) { cacheStatisticManager.add( NUM_REMOTE_GET, numRemote, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( REMOTE_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } cacheStatisticManager.add( ALL_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( NUM_GET, keys.size(), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
@Override public <KOut, VOut> Map<KOut, VOut> reduce(ReduceCommand<KOut, VOut> reduceCommand) throws InterruptedException { Cache<?, ?> cache = cacheManager.getCache(reduceCommand.getCacheName()); Set<KOut> keys = reduceCommand.getKeys(); String taskId = reduceCommand.getTaskId(); Reducer<KOut, VOut> reducer = reduceCommand.getReducer(); boolean useIntermediateKeys = reduceCommand.isEmitCompositeIntermediateKeys(); boolean noInputKeys = keys == null || keys.isEmpty(); Cache<Object, List<VOut>> tmpCache = cacheManager.getCache(reduceCommand.getCacheName()); Map<KOut, VOut> result = new HashMap<KOut, VOut>(); if (noInputKeys) { // illegal state, raise exception throw new IllegalStateException( "Reduce phase of MapReduceTask " + taskId + " on node " + cdl.getAddress() + " executed with empty input keys"); } else { // first hook into lifecycle MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); log.tracef("For m/r task %s invoking %s at %s", taskId, reduceCommand, cdl.getAddress()); int interruptCount = 0; long start = log.isTraceEnabled() ? timeService.time() : 0; try { taskLifecycleService.onPreExecute(reducer, cache); for (KOut key : keys) { interruptCount++; if (checkInterrupt(interruptCount++) && Thread.currentThread().isInterrupted()) throw new InterruptedException(); // load result value from map phase List<VOut> value; if (useIntermediateKeys) { value = tmpCache.get(new IntermediateCompositeKey<KOut>(taskId, key)); } else { value = tmpCache.get(key); } // and reduce it VOut reduced = reducer.reduce(key, value.iterator()); result.put(key, reduced); log.tracef( "For m/r task %s reduced %s to %s at %s ", taskId, key, reduced, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Reduce for task %s took %s milliseconds", reduceCommand.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(reducer); } } return result; }
@Override @ManagedOperation( description = "Resets statistics gathered by this component", displayName = "Reset statistics") public void resetStatistics() { if (isStatisticsEnabled()) { reset(); resetNanoseconds.set(ts.time()); } }
@Override public MarshalledEntry load(Object key) { if (!isValidKeyType(key)) { return null; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); try { if (entity == null) return null; InternalMetadata m = null; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Failed to marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (metadata != null && metadata.getMetadata() != null) { try { m = (InternalMetadata) marshaller.objectFromByteBuffer(metadata.getMetadata()); } catch (Exception e) { throw new JpaStoreException("Failed to unmarshall metadata", e); } if (m.isExpired(timeService.wallClockTime())) { return null; } } } if (trace) log.trace("Loaded " + entity + " (" + m + ")"); return marshallerEntryFactory.newMarshalledEntry(key, entity, m); } finally { try { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } catch (Exception e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Failed to load entry", e); } } } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
@Override public void write(MarshalledEntry entry) { EntityManager em = emf.createEntityManager(); Object entity = entry.getValue(); MetadataEntity metadata = configuration.storeMetadata() ? new MetadataEntity( entry.getKeyBytes(), entry.getMetadataBytes(), entry.getMetadata() == null ? Long.MAX_VALUE : entry.getMetadata().expiryTime()) : null; try { if (!configuration.entityClass().isAssignableFrom(entity.getClass())) { throw new JpaStoreException( String.format( "This cache is configured with JPA CacheStore to only store values of type %s - cannot write %s = %s", configuration.entityClass().getName(), entity, entity.getClass().getName())); } else { EntityTransaction txn = em.getTransaction(); Object id = emf.getPersistenceUnitUtil().getIdentifier(entity); if (!entry.getKey().equals(id)) { throw new JpaStoreException( "Entity id value must equal to key of cache entry: " + "key = [" + entry.getKey() + "], id = [" + id + "]"); } long txnBegin = timeService.time(); try { if (trace) log.trace("Writing " + entity + "(" + toString(metadata) + ")"); txn.begin(); long entityMergeBegin = timeService.time(); em.merge(entity); stats.addEntityMerge(timeService.time() - entityMergeBegin); if (metadata != null && metadata.hasBytes()) { long metadataMergeBegin = timeService.time(); em.merge(metadata); stats.addMetadataMerge(timeService.time() - metadataMergeBegin); } txn.commit(); stats.addWriteTxCommited(timeService.time() - txnBegin); } catch (Exception e) { stats.addWriteTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Exception caught in write()", e); } finally { if (txn != null && txn.isActive()) txn.rollback(); } } } finally { em.close(); } }
@Override public CompletableFuture<Void> visitPrepareCommand( TxInvocationContext ctx, PrepareCommand command) throws Throwable { GlobalTransaction globalTransaction = command.getGlobalTransaction(); if (trace) { log.tracef( "Visit Prepare command %s. Is it local?. Transaction is %s", command, ctx.isOriginLocal(), globalTransaction.globalId()); } initStatsIfNecessary(ctx); cacheStatisticManager.onPrepareCommand(globalTransaction, ctx.isOriginLocal()); if (command.hasModifications()) { cacheStatisticManager.markAsWriteTransaction(globalTransaction, ctx.isOriginLocal()); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { processWriteException(rCtx, globalTransaction, throwable); } else { long end = timeService.time(); updateTime( PREPARE_EXECUTION_TIME, NUM_PREPARE_COMMAND, start, end, globalTransaction, rCtx.isOriginLocal()); } if (((PrepareCommand) rCommand).isOnePhaseCommit()) { boolean local = rCtx.isOriginLocal(); boolean success = throwable == null; cacheStatisticManager.setTransactionOutcome( success, globalTransaction, rCtx.isOriginLocal()); cacheStatisticManager.terminateTransaction(globalTransaction, local, !local); } return null; }); }
@Override public CompletableFuture<Void> visitGetKeyValueCommand( InvocationContext ctx, GetKeyValueCommand command) throws Throwable { if (trace) { log.tracef( "Visit Get Key Value command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { if (throwable != null) { throw throwable; } long end = timeService.time(); initStatsIfNecessary(rCtx); Object key = ((GetKeyValueCommand) rCommand).getKey(); if (isRemote(key)) { cacheStatisticManager.increment( NUM_REMOTE_GET, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.add( REMOTE_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } cacheStatisticManager.add( ALL_GET_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.increment( NUM_GET, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
private synchronized void fetchClusterWideStatsIfNeeded() { if (launchNewDistTask()) { try { List<CompletableFuture<Map<String, Number>>> responseList = des.submitEverywhere(new DistributedCacheStatsCallable()); updateFieldsFromResponseMap(responseList); } catch (Exception e) { log.warn("Could not execute cluster wide cache stats operation ", e); } finally { statsUpdateTimestamp = ts.time(); } } }
private CompletableFuture<Void> visitWriteCommand( InvocationContext ctx, WriteCommand command, Object key) throws Throwable { if (trace) { log.tracef( "Visit write command %s. Is it in transaction scope? %s. Is it local? %s", command, ctx.isInTxScope(), ctx.isOriginLocal()); } if (!ctx.isInTxScope()) { return ctx.continueInvocation(); } long start = timeService.time(); return ctx.onReturn( (rCtx, rCommand, rv, throwable) -> { long end = timeService.time(); initStatsIfNecessary(rCtx); if (throwable != null) { processWriteException(rCtx, getGlobalTransaction(rCtx), throwable); } else { if (isRemote(key)) { cacheStatisticManager.add( REMOTE_PUT_EXECUTION, timeService.timeDuration(start, end, NANOSECONDS), getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.increment( NUM_REMOTE_PUT, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); } } cacheStatisticManager.increment( NUM_PUT, getGlobalTransaction(rCtx), rCtx.isOriginLocal()); cacheStatisticManager.markAsWriteTransaction( getGlobalTransaction(rCtx), rCtx.isOriginLocal()); return null; }); }
private <KIn, VIn, KOut, VOut> Map<KOut, List<VOut>> combineForLocalReduction( MapCombineCommand<KIn, VIn, KOut, VOut> mcc, CollectableCollector<KOut, VOut> collector) { String taskId = mcc.getTaskId(); Reducer<KOut, VOut> combiner = mcc.getCombiner(); Map<KOut, List<VOut>> result = null; if (combiner != null) { result = new HashMap<KOut, List<VOut>>(); log.tracef("For m/r task %s invoking combiner %s at %s", taskId, mcc, cdl.getAddress()); MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); long start = log.isTraceEnabled() ? timeService.time() : 0; try { Cache<?, ?> cache = cacheManager.getCache(mcc.getCacheName()); taskLifecycleService.onPreExecute(combiner, cache); Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); for (Entry<KOut, List<VOut>> e : collectedValues.entrySet()) { VOut combined; List<VOut> list = e.getValue(); List<VOut> l = new LinkedList<VOut>(); if (list.size() > 1) { combined = combiner.reduce(e.getKey(), list.iterator()); } else { combined = list.get(0); } l.add(combined); result.put(e.getKey(), l); log.tracef( "For m/r task %s combined %s to %s at %s", taskId, e.getKey(), combined, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Combine for task %s took %s milliseconds", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(combiner); } } else { // Combiner not specified result = collector.collectedValues(); } return result; }
@Override public boolean contains(Object key) { if (!isValidKeyType(key)) { return false; } EntityManager em = emf.createEntityManager(); try { EntityTransaction txn = em.getTransaction(); long txnBegin = timeService.time(); txn.begin(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (trace) log.trace("Entity " + key + " -> " + entity); try { if (entity == null) return false; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Cannot marshall key", e); } long metadataFindBegin = timeService.time(); MetadataEntity metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); if (trace) log.trace("Metadata " + key + " -> " + toString(metadata)); return metadata == null || metadata.expiration > timeService.wallClockTime(); } else { return true; } } finally { txn.commit(); stats.addReadTxCommitted(timeService.time() - txnBegin); } } catch (RuntimeException e) { stats.addReadTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }
protected <KIn, VIn, KOut, VOut> CollectableCollector<KOut, VOut> map( MapCombineCommand<KIn, VIn, KOut, VOut> mcc) throws InterruptedException { Cache<KIn, VIn> cache = cacheManager.getCache(mcc.getCacheName()); Set<KIn> keys = mcc.getKeys(); Set<KIn> inputKeysCopy = null; Mapper<KIn, VIn, KOut, VOut> mapper = mcc.getMapper(); DistributionManager dm = cache.getAdvancedCache().getDistributionManager(); boolean inputKeysSpecified = keys != null && !keys.isEmpty(); Set<KIn> inputKeys = keys; if (!inputKeysSpecified) { inputKeys = filterLocalPrimaryOwner(cache.keySet(), dm); } else { inputKeysCopy = new HashSet<KIn>(keys); } // hook map function into lifecycle and execute it MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); DefaultCollector<KOut, VOut> collector = new DefaultCollector<KOut, VOut>(); log.tracef("For m/r task %s invoking %s with input keys %s", mcc.getTaskId(), mcc, inputKeys); int interruptCount = 0; long start = log.isTraceEnabled() ? timeService.time() : 0; try { taskLifecycleService.onPreExecute(mapper, cache); for (KIn key : inputKeys) { if (checkInterrupt(interruptCount++) && Thread.currentThread().isInterrupted()) throw new InterruptedException(); VIn value = cache.get(key); mapper.map(key, value, collector); if (inputKeysSpecified) { inputKeysCopy.remove(key); } } Set<KIn> keysFromCacheLoader = null; if (inputKeysSpecified) { // load only specified remaining input keys - iff in CL and pinned to this primary owner keysFromCacheLoader = filterLocalPrimaryOwner(inputKeysCopy, dm); } else { // load everything from CL pinned to this primary owner keysFromCacheLoader = filterLocalPrimaryOwner(loadAllKeysFromCacheLoaderUsingFilter(inputKeys), dm); } log.tracef( "For m/r task %s cache loader input keys %s", mcc.getTaskId(), keysFromCacheLoader); interruptCount = 0; for (KIn key : keysFromCacheLoader) { if (checkInterrupt(interruptCount++) && Thread.currentThread().isInterrupted()) throw new InterruptedException(); VIn value = loadValueFromCacheLoader(key); if (value != null) { mapper.map(key, value, collector); } } } finally { if (log.isTraceEnabled()) { log.tracef( "Map phase for task %s took %s milliseconds", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(mapper); } return collector; }
@Override public void purge(Executor threadPool, final PurgeListener listener) { ExecutorAllCompletionService eacs = new ExecutorAllCompletionService(threadPool); EntityManager em = emf.createEntityManager(); try { CriteriaBuilder cb = em.getCriteriaBuilder(); CriteriaQuery<MetadataEntity> cq = cb.createQuery(MetadataEntity.class); Root root = cq.from(MetadataEntity.class); long currentTime = timeService.wallClockTime(); cq.where(cb.le(root.get(MetadataEntity.EXPIRATION), currentTime)); for (MetadataEntity metadata : em.createQuery(cq).getResultList()) { EntityTransaction txn = em.getTransaction(); final Object key; try { key = marshaller.objectFromByteBuffer(metadata.name); } catch (Exception e) { throw new JpaStoreException("Cannot unmarshall key", e); } long txnBegin = timeService.time(); txn.begin(); try { long metadataFindBegin = timeService.time(); metadata = em.find(MetadataEntity.class, metadata.name); stats.addMetadataFind(timeService.time() - metadataFindBegin); // check for transaction - I hope write skew check is done here if (metadata.expiration > currentTime) { txn.rollback(); continue; } long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (entity != null) { // the entry may have been removed long entityRemoveBegin = timeService.time(); em.remove(entity); stats.addEntityRemove(timeService.time() - entityRemoveBegin); } long metadataRemoveBegin = timeService.time(); em.remove(metadata); stats.addMetadataRemove(timeService.time() - metadataRemoveBegin); txn.commit(); stats.addRemoveTxCommitted(timeService.time() - txnBegin); if (trace) log.trace("Expired " + key + " -> " + entity + "(" + toString(metadata) + ")"); if (listener != null) { eacs.submit( new Runnable() { @Override public void run() { listener.entryPurged(key); } }, null); } } catch (RuntimeException e) { stats.addRemoveTxFailed(timeService.time() - txnBegin); throw e; } finally { if (txn != null && txn.isActive()) { txn.rollback(); } } } } finally { em.close(); } eacs.waitUntilAllCompleted(); if (eacs.isExceptionThrown()) { throw new JpaStoreException(eacs.getFirstException()); } }
private boolean launchNewDistTask() { long duration = ts.timeDuration(statsUpdateTimestamp, ts.time(), TimeUnit.MILLISECONDS); return duration > staleStatsTreshold; }
/** * With the current state transfer implementation it is possible for a transaction to be prepared * several times on a remote node. This might cause leaks, e.g. if the transaction is prepared, * committed and prepared again. Once marked as completed (because of commit or rollback) any * further prepare received on that transaction are discarded. */ public void markTransactionCompleted(GlobalTransaction globalTx) { if (totalOrder) { return; } completedTransactions.put(globalTx, timeService.time()); }
protected <KIn, VIn, KOut, VOut> Set<KOut> combine( MapCombineCommand<KIn, VIn, KOut, VOut> mcc, CollectableCollector<KOut, VOut> collector) throws Exception { String taskId = mcc.getTaskId(); boolean emitCompositeIntermediateKeys = mcc.isEmitCompositeIntermediateKeys(); Reducer<KOut, VOut> combiner = mcc.getCombiner(); Set<KOut> mapPhaseKeys = new HashSet<KOut>(); Cache<Object, DeltaAwareList<VOut>> tmpCache = null; if (emitCompositeIntermediateKeys) { tmpCache = cacheManager.getCache(DEFAULT_TMP_CACHE_CONFIGURATION_NAME); } else { tmpCache = cacheManager.getCache(taskId); } if (tmpCache == null) { throw new IllegalStateException( "Temporary cache for MapReduceTask " + taskId + " not found on " + cdl.getAddress()); } DistributionManager dm = tmpCache.getAdvancedCache().getDistributionManager(); if (combiner != null) { Cache<?, ?> cache = cacheManager.getCache(mcc.getCacheName()); log.tracef("For m/r task %s invoking combiner %s at %s", taskId, mcc, cdl.getAddress()); MapReduceTaskLifecycleService taskLifecycleService = MapReduceTaskLifecycleService.getInstance(); Map<KOut, VOut> combinedMap = new ConcurrentHashMap<KOut, VOut>(); long start = log.isTraceEnabled() ? timeService.time() : 0; try { taskLifecycleService.onPreExecute(combiner, cache); Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); for (Entry<KOut, List<VOut>> e : collectedValues.entrySet()) { List<VOut> list = e.getValue(); VOut combined; if (list.size() > 1) { combined = combiner.reduce(e.getKey(), list.iterator()); combinedMap.put(e.getKey(), combined); } else { combined = list.get(0); combinedMap.put(e.getKey(), combined); } log.tracef( "For m/r task %s combined %s to %s at %s", taskId, e.getKey(), combined, cdl.getAddress()); } } finally { if (log.isTraceEnabled()) { log.tracef( "Combine for task %s took %s milliseconds", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS)); } taskLifecycleService.onPostExecute(combiner); } Map<Address, List<KOut>> keysToNodes = mapKeysToNodes(dm, taskId, combinedMap.keySet(), emitCompositeIntermediateKeys); start = log.isTraceEnabled() ? timeService.time() : 0; try { for (Entry<Address, List<KOut>> entry : keysToNodes.entrySet()) { List<KOut> keysHashedToAddress = entry.getValue(); try { log.tracef( "For m/r task %s migrating intermediate keys %s to %s", taskId, keysHashedToAddress, entry.getKey()); for (KOut key : keysHashedToAddress) { VOut value = combinedMap.get(key); DeltaAwareList<VOut> delta = new DeltaAwareList<VOut>(value); if (emitCompositeIntermediateKeys) { tmpCache.put(new IntermediateCompositeKey<KOut>(taskId, key), delta); } else { tmpCache.put(key, delta); } mapPhaseKeys.add(key); } } catch (Exception e) { throw new CacheException( "Could not move intermediate keys/values for M/R task " + taskId, e); } } } finally { if (log.isTraceEnabled()) { log.tracef( "Migrating keys for task %s took %s milliseconds (Migrated %s keys)", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS), mapPhaseKeys.size()); } } } else { // Combiner not specified so lets insert each key/uncombined-List pair into tmp cache Map<KOut, List<VOut>> collectedValues = collector.collectedValues(); Map<Address, List<KOut>> keysToNodes = mapKeysToNodes(dm, taskId, collectedValues.keySet(), emitCompositeIntermediateKeys); long start = log.isTraceEnabled() ? timeService.time() : 0; try { for (Entry<Address, List<KOut>> entry : keysToNodes.entrySet()) { List<KOut> keysHashedToAddress = entry.getValue(); try { log.tracef( "For m/r task %s migrating intermediate keys %s to %s", taskId, keysHashedToAddress, entry.getKey()); for (KOut key : keysHashedToAddress) { List<VOut> value = collectedValues.get(key); DeltaAwareList<VOut> delta = new DeltaAwareList<VOut>(value); if (emitCompositeIntermediateKeys) { tmpCache.put(new IntermediateCompositeKey<KOut>(taskId, key), delta); } else { tmpCache.put(key, delta); } mapPhaseKeys.add(key); } } catch (Exception e) { throw new CacheException( "Could not move intermediate keys/values for M/R task " + taskId, e); } } } finally { if (log.isTraceEnabled()) { log.tracef( "Migrating keys for task %s took %s milliseconds (Migrated %s keys)", mcc.getTaskId(), timeService.timeDuration(start, TimeUnit.MILLISECONDS), mapPhaseKeys.size()); } } } return mapPhaseKeys; }
public boolean delete(Object key) { if (!isValidKeyType(key)) { return false; } EntityManager em = emf.createEntityManager(); try { long entityFindBegin = timeService.time(); Object entity = em.find(configuration.entityClass(), key); stats.addEntityFind(timeService.time() - entityFindBegin); if (entity == null) { return false; } MetadataEntity metadata = null; if (configuration.storeMetadata()) { byte[] keyBytes; try { keyBytes = marshaller.objectToByteBuffer(key); } catch (Exception e) { throw new JpaStoreException("Failed to marshall key", e); } long metadataFindBegin = timeService.time(); metadata = em.find(MetadataEntity.class, keyBytes); stats.addMetadataFind(timeService.time() - metadataFindBegin); } EntityTransaction txn = em.getTransaction(); if (trace) log.trace("Removing " + entity + "(" + toString(metadata) + ")"); long txnBegin = timeService.time(); txn.begin(); try { long entityRemoveBegin = timeService.time(); em.remove(entity); stats.addEntityRemove(timeService.time() - entityRemoveBegin); if (metadata != null) { long metadataRemoveBegin = timeService.time(); em.remove(metadata); stats.addMetadataRemove(timeService.time() - metadataRemoveBegin); } txn.commit(); stats.addRemoveTxCommitted(timeService.time() - txnBegin); return true; } catch (Exception e) { stats.addRemoveTxFailed(timeService.time() - txnBegin); throw new JpaStoreException("Exception caught in delete()", e); } finally { if (txn != null && txn.isActive()) txn.rollback(); } } finally { em.close(); } }