/** * Waits until total number of events processed is equal or greater then argument passed. * * @param cnt Number of events to wait. * @param timeout Timeout to wait. * @return {@code True} if successfully waited, {@code false} if timeout happened. * @throws InterruptedException If thread is interrupted. */ public synchronized boolean awaitEvents(int cnt, long timeout) throws InterruptedException { long start = U.currentTimeMillis(); long now = start; while (start + timeout > now) { if (evtCnt >= cnt) return true; wait(start + timeout - now); now = U.currentTimeMillis(); } return false; }
/** * Log task mapped. * * @param log Logger. * @param clazz Task class. * @param nodes Mapped nodes. */ public static void logMapped( @Nullable IgniteLogger log, Class<?> clazz, Collection<ClusterNode> nodes) { log0( log, U.currentTimeMillis(), String.format("[%s]: MAPPED: %s", clazz.getSimpleName(), U.toShortString(nodes))); }
/** {@inheritDoc} */ @Override public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds) throws GridSpiException { assert !F.isEmpty(nodeIds); long now = U.currentTimeMillis(); Collection<UUID> expired = new LinkedList<>(); for (UUID id : nodeIds) { GridNodeMetrics nodeMetrics = metricsMap.get(id); Long ts = tsMap.get(id); if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id); } if (!expired.isEmpty()) { Map<UUID, GridNodeMetrics> refreshed = metrics0(expired); for (UUID id : refreshed.keySet()) tsMap.put(id, now); metricsMap.putAll(refreshed); } return F.view(metricsMap, F.contains(nodeIds)); }
/** * Method cleans up all events that either outnumber queue size or exceeds time-to-live value. It * does none if someone else cleans up queue (lock is locked) or if there are queue readers * (readersNum > 0). */ private void cleanupQueue() { long now = U.currentTimeMillis(); long queueOversize = evts.sizex() - expireCnt; for (int i = 0; i < queueOversize && evts.sizex() > expireCnt; i++) { GridEvent expired = evts.poll(); if (log.isDebugEnabled()) log.debug("Event expired by count: " + expired); } while (true) { ConcurrentLinkedDeque8.Node<GridEvent> node = evts.peekx(); if (node == null) // Queue is empty. break; GridEvent evt = node.item(); if (evt == null) // Competing with another thread. continue; if (now - evt.timestamp() < expireAgeMs) break; if (evts.unlinkx(node) && log.isDebugEnabled()) log.debug("Event expired by age: " + node.item()); } }
/** {@inheritDoc} */ @Override protected final boolean tryReleaseShared(int ignore) { endTime = U.currentTimeMillis(); // Always signal after setting final done status. return true; }
/** {@inheritDoc} */ @Nullable @Override public Map<? extends GridComputeJob, GridNode> map( List<GridNode> subgrid, @Nullable GridBiTuple<Set<UUID>, A> arg) throws GridException { assert arg != null; assert arg.get1() != null; start = U.currentTimeMillis(); boolean debug = debugState(g); if (debug) logStart(g.log(), getClass(), start); Set<UUID> nodeIds = arg.get1(); Map<GridComputeJob, GridNode> map = U.newHashMap(nodeIds.size()); try { taskArg = arg.get2(); for (GridNode node : subgrid) if (nodeIds.contains(node.id())) map.put(job(taskArg), node); return map; } finally { if (debug) logMapped(g.log(), getClass(), map.values()); } }
/** Transaction commit callback. */ public void onTxCommit() { commitTime = U.currentTimeMillis(); txCommits++; if (delegate != null) delegate.onTxCommit(); }
/** Transaction rollback callback. */ public void onTxRollback() { rollbackTime = U.currentTimeMillis(); txRollbacks++; if (delegate != null) delegate.onTxRollback(); }
/** * @param id Timeout ID. * @param timeout Timeout for this object. */ protected GridTimeoutObjectAdapter(GridUuid id, long timeout) { this.id = id; long endTime = timeout >= 0 ? U.currentTimeMillis() + timeout : Long.MAX_VALUE; this.endTime = endTime >= 0 ? endTime : Long.MAX_VALUE; }
/** Cache write callback. */ public void onWrite() { writeTime = U.currentTimeMillis(); writes++; if (delegate != null) delegate.onWrite(); }
/** * Log finished. * * @param log Logger. * @param clazz Class. * @param start Start time. */ public static void logFinish(@Nullable IgniteLogger log, Class<?> clazz, long start) { final long end = U.currentTimeMillis(); log0( log, end, String.format( "[%s]: FINISHED, duration: %s", clazz.getSimpleName(), formatDuration(end - start))); }
/** * Cache read callback. * * @param isHit Hit or miss flag. */ public void onRead(boolean isHit) { readTime = U.currentTimeMillis(); reads++; if (isHit) hits++; else misses++; if (delegate != null) delegate.onRead(isHit); }
/** * Log message. * * @param log Logger. * @param msg Message to log. * @param clazz class. * @param start start time. * @return Time when message was logged. */ public static long log(@Nullable IgniteLogger log, String msg, Class<?> clazz, long start) { final long end = U.currentTimeMillis(); log0( log, end, String.format( "[%s]: %s, duration: %s", clazz.getSimpleName(), msg, formatDuration(end - start))); return end; }
/** * Flushes every internal buffer if buffer was flushed before passed in threshold. * * <p>Does not wait for result and does not fail on errors assuming that this method should be * called periodically. */ @Override public void tryFlush() throws GridInterruptedException { if (!busyLock.enterBusy()) return; try { for (Buffer buf : bufMappings.values()) buf.flush(); lastFlushTime = U.currentTimeMillis(); } finally { leaveBusy(); } }
/** * Creates node shadow adapter. * * @param node Node. */ GridDiscoveryNodeShadowAdapter(GridNode node) { assert node != null; created = U.currentTimeMillis(); id = node.id(); attrs = Collections.unmodifiableMap(node.attributes()); addrs = Collections.unmodifiableCollection(node.addresses()); hostNames = Collections.unmodifiableCollection(node.hostNames()); order = node.order(); lastMetrics = node.metrics(); daemon = "true".equalsIgnoreCase(this.<String>attribute(ATTR_DAEMON)); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { nodeId = GridUtils.readUuid(in); ver = CU.readVersion(in); timeout = in.readLong(); threadId = in.readLong(); id = in.readLong(); short flags = in.readShort(); mask(OWNER, OWNER.get(flags)); mask(USED, USED.get(flags)); mask(TX, TX.get(flags)); ts = U.currentTimeMillis(); }
/** * @param parent Parent entry. * @param nodeId Requesting node ID. * @param otherNodeId Near node ID. * @param otherVer Other version. * @param threadId Requesting thread ID. * @param ver Cache version. * @param timeout Maximum wait time. * @param loc {@code True} if the lock is local. * @param reentry {@code True} if candidate is for reentry. * @param tx Transaction flag. * @param singleImplicit Single-key-implicit-transaction flag. * @param nearLoc Near-local flag. * @param dhtLoc DHT local flag. */ public GridCacheMvccCandidate( GridCacheEntryEx<K, ?> parent, UUID nodeId, @Nullable UUID otherNodeId, @Nullable GridCacheVersion otherVer, long threadId, GridCacheVersion ver, long timeout, boolean loc, boolean reentry, boolean tx, boolean singleImplicit, boolean nearLoc, boolean dhtLoc) { assert nodeId != null; assert ver != null; assert parent != null; this.parent = parent; this.nodeId = nodeId; this.otherNodeId = otherNodeId; this.otherVer = otherVer; this.threadId = threadId; this.ver = ver; this.timeout = timeout; mask(LOCAL, loc); mask(REENTRY, reentry); mask(TX, tx); mask(SINGLE_IMPLICIT, singleImplicit); mask(NEAR_LOCAL, nearLoc); mask(DHT_LOCAL, dhtLoc); ts = U.currentTimeMillis(); id = IDGEN.incrementAndGet(); }
/** {@inheritDoc} */ @Override public long getDelay(TimeUnit unit) { return unit.convert(nextFlushTime() - U.currentTimeMillis(), TimeUnit.MILLISECONDS); }
/** Data loader implementation. */ public class GridDataLoaderImpl<K, V> implements GridDataLoader<K, V>, Delayed { /** Cache updater. */ private GridDataLoadCacheUpdater<K, V> updater = GridDataLoadCacheUpdaters.individual(); /** */ private byte[] updaterBytes; /** Max remap count before issuing an error. */ private static final int MAX_REMAP_CNT = 32; /** Log reference. */ private static final AtomicReference<GridLogger> logRef = new AtomicReference<>(); /** Cache name ({@code null} for default cache). */ private final String cacheName; /** Per-node buffer size. */ @SuppressWarnings("FieldAccessedSynchronizedAndUnsynchronized") private int bufSize = DFLT_PER_NODE_BUFFER_SIZE; /** */ private int parallelOps = DFLT_MAX_PARALLEL_OPS; /** */ private long autoFlushFreq; /** Mapping. */ @GridToStringInclude private ConcurrentMap<UUID, Buffer> bufMappings = new ConcurrentHashMap8<>(); /** Logger. */ private GridLogger log; /** Discovery listener. */ private final GridLocalEventListener discoLsnr; /** Context. */ private final GridKernalContext ctx; /** Communication topic for responses. */ private final Object topic; /** */ private byte[] topicBytes; /** {@code True} if data loader has been cancelled. */ private volatile boolean cancelled; /** Active futures of this data loader. */ @GridToStringInclude private final Collection<GridFuture<?>> activeFuts = new GridConcurrentHashSet<>(); /** Closure to remove from active futures. */ @GridToStringExclude private final GridInClosure<GridFuture<?>> rmvActiveFut = new GridInClosure<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { boolean rmv = activeFuts.remove(t); assert rmv; } }; /** Job peer deploy aware. */ private volatile GridPeerDeployAware jobPda; /** Deployment class. */ private Class<?> depCls; /** Future to track loading finish. */ private final GridFutureAdapter<?> fut; /** Busy lock. */ private final GridSpinBusyLock busyLock = new GridSpinBusyLock(); /** Closed flag. */ private final AtomicBoolean closed = new AtomicBoolean(); /** */ private volatile long lastFlushTime = U.currentTimeMillis(); /** */ private final DelayQueue<GridDataLoaderImpl<K, V>> flushQ; /** * @param ctx Grid kernal context. * @param cacheName Cache name. * @param flushQ Flush queue. */ public GridDataLoaderImpl( final GridKernalContext ctx, @Nullable final String cacheName, DelayQueue<GridDataLoaderImpl<K, V>> flushQ) { assert ctx != null; this.ctx = ctx; this.cacheName = cacheName; this.flushQ = flushQ; log = U.logger(ctx, logRef, GridDataLoaderImpl.class); discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; UUID id = discoEvt.eventNodeId(); // Remap regular mappings. final Buffer buf = bufMappings.remove(id); if (buf != null) { // Only async notification is possible since // discovery thread may be trapped otherwise. ctx.closure() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { buf.onNodeLeft(); return null; } }, true /* system pool */); } } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); // Generate unique topic for this loader. topic = TOPIC_DATALOAD.topic(GridUuid.fromUuid(ctx.localNodeId())); ctx.io() .addMessageListener( topic, new GridMessageListener() { @Override public void onMessage(UUID nodeId, Object msg) { assert msg instanceof GridDataLoadResponse; GridDataLoadResponse res = (GridDataLoadResponse) msg; if (log.isDebugEnabled()) log.debug("Received data load response: " + res); Buffer buf = bufMappings.get(nodeId); if (buf != null) buf.onResponse(res); else if (log.isDebugEnabled()) log.debug("Ignoring response since node has left [nodeId=" + nodeId + ", "); } }); if (log.isDebugEnabled()) log.debug("Added response listener within topic: " + topic); fut = new GridDataLoaderFuture(ctx, this); } /** Enters busy lock. */ private void enterBusy() { if (!busyLock.enterBusy()) throw new IllegalStateException("Data loader has been closed."); } /** Leaves busy lock. */ private void leaveBusy() { busyLock.leaveBusy(); } /** {@inheritDoc} */ @Override public GridFuture<?> future() { return fut; } /** {@inheritDoc} */ @Override public void deployClass(Class<?> depCls) { this.depCls = depCls; } /** {@inheritDoc} */ @Override public void updater(GridDataLoadCacheUpdater<K, V> updater) { A.notNull(updater, "updater"); this.updater = updater; } /** {@inheritDoc} */ @Override public boolean isolated() { return updater != GridDataLoadCacheUpdaters.individual(); } /** {@inheritDoc} */ @Override public void isolated(boolean isolated) throws GridException { if (isolated()) return; GridNode node = F.first(ctx.grid().forCache(cacheName).nodes()); if (node == null) throw new GridException("Failed to get node for cache: " + cacheName); GridCacheAttributes a = U.cacheAttributes(node, cacheName); assert a != null; updater = a.atomicityMode() == GridCacheAtomicityMode.ATOMIC ? GridDataLoadCacheUpdaters.<K, V>batched() : GridDataLoadCacheUpdaters.<K, V>groupLocked(); } /** {@inheritDoc} */ @Override @Nullable public String cacheName() { return cacheName; } /** {@inheritDoc} */ @Override public int perNodeBufferSize() { return bufSize; } /** {@inheritDoc} */ @Override public void perNodeBufferSize(int bufSize) { A.ensure(bufSize > 0, "bufSize > 0"); this.bufSize = bufSize; } /** {@inheritDoc} */ @Override public int perNodeParallelLoadOperations() { return parallelOps; } /** {@inheritDoc} */ @Override public void perNodeParallelLoadOperations(int parallelOps) { this.parallelOps = parallelOps; } /** {@inheritDoc} */ @Override public long autoFlushFrequency() { return autoFlushFreq; } /** {@inheritDoc} */ @Override public void autoFlushFrequency(long autoFlushFreq) { A.ensure(autoFlushFreq >= 0, "autoFlushFreq >= 0"); long old = this.autoFlushFreq; if (autoFlushFreq != old) { this.autoFlushFreq = autoFlushFreq; if (autoFlushFreq != 0 && old == 0) flushQ.add(this); else if (autoFlushFreq == 0) flushQ.remove(this); } } /** {@inheritDoc} */ @Override public GridFuture<?> addData(Map<K, V> entries) throws IllegalStateException { A.notNull(entries, "entries"); return addData(entries.entrySet()); } /** {@inheritDoc} */ @Override public GridFuture<?> addData(Collection<? extends Map.Entry<K, V>> entries) { A.notEmpty(entries, "entries"); enterBusy(); try { GridFutureAdapter<Object> resFut = new GridFutureAdapter<>(ctx); activeFuts.add(resFut); resFut.listenAsync(rmvActiveFut); Collection<K> keys = new GridConcurrentHashSet<>(entries.size(), 1.0f, 16); for (Map.Entry<K, V> entry : entries) keys.add(entry.getKey()); load0(entries, resFut, keys, 0); return resFut; } finally { leaveBusy(); } } /** {@inheritDoc} */ @Override public GridFuture<?> addData(Map.Entry<K, V> entry) throws GridException, IllegalStateException { A.notNull(entry, "entry"); return addData(F.asList(entry)); } /** {@inheritDoc} */ @Override public GridFuture<?> addData(K key, V val) throws GridException, IllegalStateException { A.notNull(key, "key"); return addData(new Entry0<>(key, val)); } /** {@inheritDoc} */ @Override public GridFuture<?> removeData(K key) throws GridException, IllegalStateException { return addData(key, null); } /** * @param entries Entries. * @param resFut Result future. * @param activeKeys Active keys. * @param remaps Remaps count. */ private void load0( Collection<? extends Map.Entry<K, V>> entries, final GridFutureAdapter<Object> resFut, final Collection<K> activeKeys, final int remaps) { assert entries != null; if (remaps >= MAX_REMAP_CNT) { resFut.onDone(new GridException("Failed to finish operation (too many remaps): " + remaps)); return; } Map<GridNode, Collection<Map.Entry<K, V>>> mappings = new HashMap<>(); boolean initPda = ctx.deploy().enabled() && jobPda == null; for (Map.Entry<K, V> entry : entries) { GridNode node; try { K key = entry.getKey(); assert key != null; if (initPda) { jobPda = new DataLoaderPda(key, entry.getValue(), updater); initPda = false; } node = ctx.affinity().mapKeyToNode(cacheName, key); } catch (GridException e) { resFut.onDone(e); return; } if (node == null) { resFut.onDone( new GridTopologyException( "Failed to map key to node " + "(no nodes with cache found in topology) [infos=" + entries.size() + ", cacheName=" + cacheName + ']')); return; } Collection<Map.Entry<K, V>> col = mappings.get(node); if (col == null) mappings.put(node, col = new ArrayList<>()); col.add(entry); } for (final Map.Entry<GridNode, Collection<Map.Entry<K, V>>> e : mappings.entrySet()) { final UUID nodeId = e.getKey().id(); Buffer buf = bufMappings.get(nodeId); if (buf == null) { Buffer old = bufMappings.putIfAbsent(nodeId, buf = new Buffer(e.getKey())); if (old != null) buf = old; } final Collection<Map.Entry<K, V>> entriesForNode = e.getValue(); GridInClosure<GridFuture<?>> lsnr = new GridInClosure<GridFuture<?>>() { @Override public void apply(GridFuture<?> t) { try { t.get(); for (Map.Entry<K, V> e : entriesForNode) activeKeys.remove(e.getKey()); if (activeKeys.isEmpty()) resFut.onDone(); } catch (GridException e1) { if (log.isDebugEnabled()) log.debug("Future finished with error [nodeId=" + nodeId + ", err=" + e1 + ']'); if (cancelled) { resFut.onDone( new GridException( "Data loader has been cancelled: " + GridDataLoaderImpl.this, e1)); } else load0(entriesForNode, resFut, activeKeys, remaps + 1); } } }; GridFutureAdapter<?> f; try { f = buf.update(entriesForNode, lsnr); } catch (GridInterruptedException e1) { resFut.onDone(e1); return; } if (ctx.discovery().node(nodeId) == null) { if (bufMappings.remove(nodeId, buf)) buf.onNodeLeft(); if (f != null) f.onDone( new GridTopologyException( "Failed to wait for request completion " + "(node has left): " + nodeId)); } } } /** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } } /** {@inheritDoc} */ @SuppressWarnings("ForLoopReplaceableByForEach") @Override public void flush() throws GridException { enterBusy(); try { doFlush(); } finally { leaveBusy(); } } /** * Flushes every internal buffer if buffer was flushed before passed in threshold. * * <p>Does not wait for result and does not fail on errors assuming that this method should be * called periodically. */ @Override public void tryFlush() throws GridInterruptedException { if (!busyLock.enterBusy()) return; try { for (Buffer buf : bufMappings.values()) buf.flush(); lastFlushTime = U.currentTimeMillis(); } finally { leaveBusy(); } } /** * @param cancel {@code True} to close with cancellation. * @throws GridException If failed. */ @Override public void close(boolean cancel) throws GridException { if (!closed.compareAndSet(false, true)) return; busyLock.block(); if (log.isDebugEnabled()) log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']'); GridException e = null; try { // Assuming that no methods are called on this loader after this method is called. if (cancel) { cancelled = true; for (Buffer buf : bufMappings.values()) buf.cancelAll(); } else doFlush(); ctx.event().removeLocalEventListener(discoLsnr); ctx.io().removeMessageListener(topic); } catch (GridException e0) { e = e0; } fut.onDone(null, e); if (e != null) throw e; } /** @return {@code true} If the loader is closed. */ boolean isClosed() { return fut.isDone(); } /** {@inheritDoc} */ @Override public void close() throws GridException { close(false); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridDataLoaderImpl.class, this); } /** {@inheritDoc} */ @Override public long getDelay(TimeUnit unit) { return unit.convert(nextFlushTime() - U.currentTimeMillis(), TimeUnit.MILLISECONDS); } /** @return Next flush time. */ private long nextFlushTime() { return lastFlushTime + autoFlushFreq; } /** {@inheritDoc} */ @Override public int compareTo(Delayed o) { return nextFlushTime() > ((GridDataLoaderImpl) o).nextFlushTime() ? 1 : -1; } /** */ private class Buffer { /** Node. */ private final GridNode node; /** Active futures. */ private final Collection<GridFuture<Object>> locFuts; /** Buffered entries. */ private List<Map.Entry<K, V>> entries; /** */ @GridToStringExclude private GridFutureAdapter<Object> curFut; /** Local node flag. */ private final boolean isLocNode; /** ID generator. */ private final AtomicLong idGen = new AtomicLong(); /** Active futures. */ private final ConcurrentMap<Long, GridFutureAdapter<Object>> reqs; /** */ private final Semaphore sem; /** Closure to signal on task finish. */ @GridToStringExclude private final GridInClosure<GridFuture<Object>> signalC = new GridInClosure<GridFuture<Object>>() { @Override public void apply(GridFuture<Object> t) { signalTaskFinished(t); } }; /** @param node Node. */ Buffer(GridNode node) { assert node != null; this.node = node; locFuts = new GridConcurrentHashSet<>(); reqs = new ConcurrentHashMap8<>(); // Cache local node flag. isLocNode = node.equals(ctx.discovery().localNode()); entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); sem = new Semaphore(parallelOps); } /** * @param newEntries Infos. * @param lsnr Listener for the operation future. * @throws GridInterruptedException If failed. * @return Future for operation. */ @Nullable GridFutureAdapter<?> update( Iterable<Map.Entry<K, V>> newEntries, GridInClosure<GridFuture<?>> lsnr) throws GridInterruptedException { List<Map.Entry<K, V>> entries0 = null; GridFutureAdapter<Object> curFut0; synchronized (this) { curFut0 = curFut; curFut0.listenAsync(lsnr); for (Map.Entry<K, V> entry : newEntries) entries.add(entry); if (entries.size() >= bufSize) { entries0 = entries; entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); } } if (entries0 != null) { submit(entries0, curFut0); if (cancelled) curFut0.onDone( new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this)); } return curFut0; } /** @return Fresh collection with some space for outgrowth. */ private List<Map.Entry<K, V>> newEntries() { return new ArrayList<>((int) (bufSize * 1.2)); } /** * @return Future if any submitted. * @throws GridInterruptedException If thread has been interrupted. */ @Nullable GridFuture<?> flush() throws GridInterruptedException { List<Map.Entry<K, V>> entries0 = null; GridFutureAdapter<Object> curFut0 = null; synchronized (this) { if (!entries.isEmpty()) { entries0 = entries; curFut0 = curFut; entries = newEntries(); curFut = new GridFutureAdapter<>(ctx); curFut.listenAsync(signalC); } } if (entries0 != null) submit(entries0, curFut0); // Create compound future for this flush. GridCompoundFuture<Object, Object> res = null; for (GridFuture<Object> f : locFuts) { if (res == null) res = new GridCompoundFuture<>(ctx); res.add(f); } for (GridFuture<Object> f : reqs.values()) { if (res == null) res = new GridCompoundFuture<>(ctx); res.add(f); } if (res != null) res.markInitialized(); return res; } /** * Increments active tasks count. * * @throws GridInterruptedException If thread has been interrupted. */ private void incrementActiveTasks() throws GridInterruptedException { U.acquire(sem); } /** @param f Future that finished. */ private void signalTaskFinished(GridFuture<Object> f) { assert f != null; sem.release(); } /** * @param entries Entries to submit. * @param curFut Current future. * @throws GridInterruptedException If interrupted. */ private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut) throws GridInterruptedException { assert entries != null; assert !entries.isEmpty(); assert curFut != null; incrementActiveTasks(); GridFuture<Object> fut; if (isLocNode) { fut = ctx.closure() .callLocalSafe( new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater), false); locFuts.add(fut); fut.listenAsync( new GridInClosure<GridFuture<Object>>() { @Override public void apply(GridFuture<Object> t) { try { boolean rmv = locFuts.remove(t); assert rmv; curFut.onDone(t.get()); } catch (GridException e) { curFut.onDone(e); } } }); } else { byte[] entriesBytes; try { entriesBytes = ctx.config().getMarshaller().marshal(entries); if (updaterBytes == null) { assert updater != null; updaterBytes = ctx.config().getMarshaller().marshal(updater); } if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic); } catch (GridException e) { U.error(log, "Failed to marshal (request will not be sent).", e); return; } GridDeployment dep = null; GridPeerDeployAware jobPda0 = null; if (ctx.deploy().enabled()) { try { jobPda0 = jobPda; assert jobPda0 != null; dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader()); } catch (GridException e) { U.error( log, "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(), e); return; } if (dep == null) U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass()); } long reqId = idGen.incrementAndGet(); fut = curFut; reqs.put(reqId, (GridFutureAdapter<Object>) fut); GridDataLoadRequest<Object, Object> req = new GridDataLoadRequest<>( reqId, topicBytes, cacheName, updaterBytes, entriesBytes, true, dep != null ? dep.deployMode() : null, dep != null ? jobPda0.deployClass().getName() : null, dep != null ? dep.userVersion() : null, dep != null ? dep.participants() : null, dep != null ? dep.classLoaderId() : null, dep == null); try { ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL); if (log.isDebugEnabled()) log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']'); } catch (GridException e) { if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id())) ((GridFutureAdapter<Object>) fut).onDone(e); else ((GridFutureAdapter<Object>) fut) .onDone( new GridTopologyException( "Failed to send " + "request (node has left): " + node.id())); } } } /** */ void onNodeLeft() { assert !isLocNode; assert bufMappings.get(node.id()) != this; if (log.isDebugEnabled()) log.debug("Forcibly completing futures (node has left): " + node.id()); Exception e = new GridTopologyException( "Failed to wait for request completion " + "(node has left): " + node.id()); for (GridFutureAdapter<Object> f : reqs.values()) f.onDone(e); // Make sure to complete current future. GridFutureAdapter<Object> curFut0; synchronized (this) { curFut0 = curFut; } curFut0.onDone(e); } /** @param res Response. */ void onResponse(GridDataLoadResponse res) { if (log.isDebugEnabled()) log.debug("Received data load response: " + res); GridFutureAdapter<?> f = reqs.remove(res.requestId()); if (f == null) { if (log.isDebugEnabled()) log.debug("Future for request has not been found: " + res.requestId()); return; } Throwable err = null; byte[] errBytes = res.errorBytes(); if (errBytes != null) { try { GridPeerDeployAware jobPda0 = jobPda; err = ctx.config() .getMarshaller() .unmarshal( errBytes, jobPda0 != null ? jobPda0.classLoader() : U.gridClassLoader()); } catch (GridException e) { f.onDone(null, new GridException("Failed to unmarshal response.", e)); return; } } f.onDone(null, err); if (log.isDebugEnabled()) log.debug( "Finished future [fut=" + f + ", reqId=" + res.requestId() + ", err=" + err + ']'); } /** */ void cancelAll() { GridException err = new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this); for (GridFuture<?> f : locFuts) { try { f.cancel(); } catch (GridException e) { U.error(log, "Failed to cancel mini-future.", e); } } for (GridFutureAdapter<?> f : reqs.values()) f.onDone(err); } /** {@inheritDoc} */ @Override public String toString() { int size; synchronized (this) { size = entries.size(); } return S.toString( Buffer.class, this, "entriesCnt", size, "locFutsSize", locFuts.size(), "reqsSize", reqs.size()); } } /** Data loader peer-deploy aware. */ private class DataLoaderPda implements GridPeerDeployAware { /** Deploy class. */ private Class<?> cls; /** Class loader. */ private ClassLoader ldr; /** Collection of objects to detect deploy class and class loader. */ private Collection<Object> objs; /** * Constructs data loader peer-deploy aware. * * @param objs Collection of objects to detect deploy class and class loader. */ private DataLoaderPda(Object... objs) { this.objs = Arrays.asList(objs); } /** {@inheritDoc} */ @Override public Class<?> deployClass() { if (cls == null) { Class<?> cls0 = null; if (depCls != null) cls0 = depCls; else { for (Iterator<Object> it = objs.iterator(); (cls0 == null || U.isJdk(cls0)) && it.hasNext(); ) { Object o = it.next(); if (o != null) cls0 = U.detectClass(o); } if (cls0 == null || U.isJdk(cls0)) cls0 = GridDataLoaderImpl.class; } assert cls0 != null : "Failed to detect deploy class [objs=" + objs + ']'; cls = cls0; } return cls; } /** {@inheritDoc} */ @Override public ClassLoader classLoader() { if (ldr == null) { ClassLoader ldr0 = deployClass().getClassLoader(); // Safety. if (ldr0 == null) ldr0 = U.gridClassLoader(); assert ldr0 != null : "Failed to detect classloader [objs=" + objs + ']'; ldr = ldr0; } return ldr; } } /** Entry. */ private static class Entry0<K, V> implements Map.Entry<K, V>, Externalizable { /** */ private K key; /** */ private V val; /** * @param key Key. * @param val Value. */ private Entry0(K key, @Nullable V val) { assert key != null; this.key = key; this.val = val; } /** For {@link Externalizable}. */ @SuppressWarnings("UnusedDeclaration") public Entry0() { // No-op. } /** {@inheritDoc} */ @Override public K getKey() { return key; } /** {@inheritDoc} */ @Override public V getValue() { return val; } /** {@inheritDoc} */ @Override public V setValue(V val) { throw new UnsupportedOperationException(); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(key); out.writeObject(val); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { key = (K) in.readObject(); val = (V) in.readObject(); } } }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }
/** Key partition. */ public class GridDhtLocalPartition implements Comparable<GridDhtLocalPartition>, GridReservable { /** Maximum size for delete queue. */ public static final int MAX_DELETE_QUEUE_SIZE = Integer.getInteger(IGNITE_ATOMIC_CACHE_DELETE_HISTORY_SIZE, 200_000); /** Static logger to avoid re-creation. */ private static final AtomicReference<IgniteLogger> logRef = new AtomicReference<>(); /** Logger. */ private static volatile IgniteLogger log; /** Partition ID. */ private final int id; /** State. */ @GridToStringExclude private final AtomicStampedReference<GridDhtPartitionState> state = new AtomicStampedReference<>(MOVING, 0); /** Rent future. */ @GridToStringExclude private final GridFutureAdapter<?> rent; /** Entries map. */ private final ConcurrentMap<KeyCacheObject, GridDhtCacheEntry> map; /** Context. */ private final GridCacheContext cctx; /** Create time. */ @GridToStringExclude private final long createTime = U.currentTimeMillis(); /** Eviction history. */ private volatile Map<KeyCacheObject, GridCacheVersion> evictHist = new HashMap<>(); /** Lock. */ private final ReentrantLock lock = new ReentrantLock(); /** Public size counter. */ private final LongAdder8 mapPubSize = new LongAdder8(); /** Remove queue. */ private final GridCircularBuffer<T2<KeyCacheObject, GridCacheVersion>> rmvQueue; /** Group reservations. */ private final CopyOnWriteArrayList<GridDhtPartitionsReservation> reservations = new CopyOnWriteArrayList<>(); /** * @param cctx Context. * @param id Partition ID. */ @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor") GridDhtLocalPartition(GridCacheContext cctx, int id) { assert cctx != null; this.id = id; this.cctx = cctx; log = U.logger(cctx.kernalContext(), logRef, this); rent = new GridFutureAdapter<Object>() { @Override public String toString() { return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']'; } }; map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions()); int delQueueSize = CU.isSystemCache(cctx.name()) ? 100 : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20); rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize)); } /** * Adds group reservation to this partition. * * @param r Reservation. * @return {@code false} If such reservation already added. */ public boolean addReservation(GridDhtPartitionsReservation r) { assert state.getReference() != EVICTED : "we can reserve only active partitions"; assert state.getStamp() != 0 : "partition must be already reserved before adding group reservation"; return reservations.addIfAbsent(r); } /** @param r Reservation. */ public void removeReservation(GridDhtPartitionsReservation r) { if (!reservations.remove(r)) throw new IllegalStateException("Reservation was already removed."); } /** @return Partition ID. */ public int id() { return id; } /** @return Create time. */ long createTime() { return createTime; } /** @return Partition state. */ public GridDhtPartitionState state() { return state.getReference(); } /** @return Reservations. */ public int reservations() { return state.getStamp(); } /** @return Keys belonging to partition. */ public Set<KeyCacheObject> keySet() { return map.keySet(); } /** @return Entries belonging to partition. */ public Collection<GridDhtCacheEntry> entries() { return map.values(); } /** @return {@code True} if partition is empty. */ public boolean isEmpty() { return map.isEmpty(); } /** @return Number of entries in this partition (constant-time method). */ public int size() { return map.size(); } /** Increments public size of the map. */ public void incrementPublicSize() { mapPubSize.increment(); } /** Decrements public size of the map. */ public void decrementPublicSize() { mapPubSize.decrement(); } /** @return Number of public (non-internal) entries in this partition. */ public int publicSize() { return mapPubSize.intValue(); } /** @return If partition is moving or owning or renting. */ public boolean valid() { GridDhtPartitionState state = state(); return state == MOVING || state == OWNING || state == RENTING; } /** @param entry Entry to add. */ void onAdded(GridDhtCacheEntry entry) { GridDhtPartitionState state = state(); if (state == EVICTED) throw new GridDhtInvalidPartitionException( id, "Adding entry to invalid partition [part=" + id + ']'); map.put(entry.key(), entry); if (!entry.isInternal()) mapPubSize.increment(); } /** @param entry Entry to remove. */ @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") void onRemoved(GridDhtCacheEntry entry) { assert entry.obsolete(); // Make sure to remove exactly this entry. synchronized (entry) { map.remove(entry.key(), entry); if (!entry.isInternal() && !entry.deleted()) mapPubSize.decrement(); } // Attempt to evict. tryEvict(true); } /** * @param key Removed key. * @param ver Removed version. * @throws IgniteCheckedException If failed. */ public void onDeferredDelete(KeyCacheObject key, GridCacheVersion ver) throws IgniteCheckedException { try { T2<KeyCacheObject, GridCacheVersion> evicted = rmvQueue.add(new T2<>(key, ver)); if (evicted != null) cctx.dht().removeVersionedEntry(evicted.get1(), evicted.get2()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } } /** Locks partition. */ @SuppressWarnings({"LockAcquiredButNotSafelyReleased"}) public void lock() { lock.lock(); } /** Unlocks partition. */ public void unlock() { lock.unlock(); } /** * @param key Key. * @param ver Version. */ public void onEntryEvicted(KeyCacheObject key, GridCacheVersion ver) { assert key != null; assert ver != null; assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. if (state() != MOVING) return; Map<KeyCacheObject, GridCacheVersion> evictHist0 = evictHist; if (evictHist0 != null) { GridCacheVersion ver0 = evictHist0.get(key); if (ver0 == null || ver0.isLess(ver)) { GridCacheVersion ver1 = evictHist0.put(key, ver); assert ver1 == ver0; } } } /** * Cache preloader should call this method within partition lock. * * @param key Key. * @param ver Version. * @return {@code True} if preloading is permitted. */ public boolean preloadingPermitted(KeyCacheObject key, GridCacheVersion ver) { assert key != null; assert ver != null; assert lock.isHeldByCurrentThread(); // Only one thread can enter this method at a time. if (state() != MOVING) return false; Map<KeyCacheObject, GridCacheVersion> evictHist0 = evictHist; if (evictHist0 != null) { GridCacheVersion ver0 = evictHist0.get(key); // Permit preloading if version in history // is missing or less than passed in. return ver0 == null || ver0.isLess(ver); } return false; } /** * Reserves a partition so it won't be cleared. * * @return {@code True} if reserved. */ @Override public boolean reserve() { while (true) { int reservations = state.getStamp(); GridDhtPartitionState s = state.getReference(); if (s == EVICTED) return false; if (state.compareAndSet(s, s, reservations, reservations + 1)) return true; } } /** Releases previously reserved partition. */ @Override public void release() { while (true) { int reservations = state.getStamp(); if (reservations == 0) return; GridDhtPartitionState s = state.getReference(); assert s != EVICTED; // Decrement reservations. if (state.compareAndSet(s, s, reservations, --reservations)) { tryEvict(true); break; } } } /** @return {@code True} if transitioned to OWNING state. */ boolean own() { while (true) { int reservations = state.getStamp(); GridDhtPartitionState s = state.getReference(); if (s == RENTING || s == EVICTED) return false; if (s == OWNING) return true; assert s == MOVING; if (state.compareAndSet(MOVING, OWNING, reservations, reservations)) { if (log.isDebugEnabled()) log.debug("Owned partition: " + this); // No need to keep history any more. evictHist = null; return true; } } } /** * @param updateSeq Update sequence. * @return Future to signal that this node is no longer an owner or backup. */ IgniteInternalFuture<?> rent(boolean updateSeq) { while (true) { int reservations = state.getStamp(); GridDhtPartitionState s = state.getReference(); if (s == RENTING || s == EVICTED) return rent; if (state.compareAndSet(s, RENTING, reservations, reservations)) { if (log.isDebugEnabled()) log.debug("Moved partition to RENTING state: " + this); // Evict asynchronously, as the 'rent' method may be called // from within write locks on local partition. tryEvictAsync(updateSeq); break; } } return rent; } /** * @param updateSeq Update sequence. * @return Future for evict attempt. */ IgniteInternalFuture<Boolean> tryEvictAsync(boolean updateSeq) { if (map.isEmpty() && !GridQueryProcessor.isEnabled(cctx.config()) && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return new GridFinishedFuture<>(true); } return cctx.closures() .callLocalSafe( new GPC<Boolean>() { @Override public Boolean call() { return tryEvict(true); } }, /*system pool*/ true); } /** @return {@code true} If there is a group reservation. */ private boolean groupReserved() { for (GridDhtPartitionsReservation reservation : reservations) { if (!reservation.invalidate()) return true; // Failed to invalidate reservation -> we are reserved. } return false; } /** * @param updateSeq Update sequence. * @return {@code True} if entry has been transitioned to state EVICTED. */ boolean tryEvict(boolean updateSeq) { if (state.getReference() != RENTING || state.getStamp() != 0 || groupReserved()) return false; // Attempt to evict partition entries from cache. clearAll(); if (map.isEmpty() && state.compareAndSet(RENTING, EVICTED, 0, 0)) { if (log.isDebugEnabled()) log.debug("Evicted partition: " + this); if (!GridQueryProcessor.isEnabled(cctx.config())) clearSwap(); if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(id); cctx.dataStructures().onPartitionEvicted(id); rent.onDone(); ((GridDhtPreloader) cctx.preloader()).onPartitionEvicted(this, updateSeq); clearDeferredDeletes(); return true; } return false; } /** Clears swap entries for evicted partition. */ private void clearSwap() { assert state() == EVICTED; assert !GridQueryProcessor.isEnabled(cctx.config()) : "Indexing needs to have unswapped values."; try { GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it = cctx.swap().iterator(id); boolean isLocStore = cctx.store().isLocal(); if (it != null) { // We can safely remove these values because no entries will be created for evicted // partition. while (it.hasNext()) { Map.Entry<byte[], GridCacheSwapEntry> entry = it.next(); byte[] keyBytes = entry.getKey(); KeyCacheObject key = cctx.toCacheKeyObject(keyBytes); cctx.swap().remove(key); if (isLocStore) cctx.store().remove(null, key.value(cctx.cacheObjectContext(), false)); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } } /** */ void onUnlock() { tryEvict(true); } /** * @param topVer Topology version. * @return {@code True} if local node is primary for this partition. */ public boolean primary(AffinityTopologyVersion topVer) { return cctx.affinity().primary(cctx.localNode(), id, topVer); } /** Clears values for this partition. */ private void clearAll() { GridCacheVersion clearVer = cctx.versions().next(); boolean swap = cctx.isSwapOrOffheapEnabled(); boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED); Iterator<GridDhtCacheEntry> it = map.values().iterator(); GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null; if (swap && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values. Iterator<GridDhtCacheEntry> unswapIt = null; try { swapIt = cctx.swap().iterator(id); unswapIt = unswapIterator(swapIt); } catch (Exception e) { U.error(log, "Failed to clear swap for evicted partition: " + this, e); } if (unswapIt != null) it = F.concat(it, unswapIt); } try { while (it.hasNext()) { GridDhtCacheEntry cached = it.next(); try { if (cached.clearInternal(clearVer, swap)) { map.remove(cached.key(), cached); if (!cached.isInternal()) { mapPubSize.decrement(); if (rec) cctx.events() .addEvent( cached.partition(), cached.key(), cctx.localNodeId(), (IgniteUuid) null, null, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, null, false, cached.rawGet(), cached.hasValue(), null, null, null); } } } catch (IgniteCheckedException e) { U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e); } } } finally { U.close(swapIt, log); } } /** * @param it Swap iterator. * @return Unswapping iterator over swapped entries. */ private Iterator<GridDhtCacheEntry> unswapIterator( final GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> it) { if (it == null) return null; return new Iterator<GridDhtCacheEntry>() { /** */ GridDhtCacheEntry lastEntry; @Override public boolean hasNext() { return it.hasNext(); } @Override public GridDhtCacheEntry next() { Map.Entry<byte[], GridCacheSwapEntry> entry = it.next(); byte[] keyBytes = entry.getKey(); try { KeyCacheObject key = cctx.toCacheKeyObject(keyBytes); lastEntry = (GridDhtCacheEntry) cctx.cache().entryEx(key, false); lastEntry.unswap(true); return lastEntry; } catch (IgniteCheckedException e) { throw new CacheException(e); } } @Override public void remove() { map.remove(lastEntry.key(), lastEntry); } }; } /** */ private void clearDeferredDeletes() { rmvQueue.forEach( new CI1<T2<KeyCacheObject, GridCacheVersion>>() { @Override public void apply(T2<KeyCacheObject, GridCacheVersion> t) { cctx.dht().removeVersionedEntry(t.get1(), t.get2()); } }); } /** {@inheritDoc} */ @Override public int hashCode() { return id; } /** {@inheritDoc} */ @SuppressWarnings({"OverlyStrongTypeCast"}) @Override public boolean equals(Object obj) { return obj instanceof GridDhtLocalPartition && (obj == this || ((GridDhtLocalPartition) obj).id() == id); } /** {@inheritDoc} */ @Override public int compareTo(@NotNull GridDhtLocalPartition part) { if (part == null) return 1; return Integer.compare(id, part.id()); } /** {@inheritDoc} */ @Override public String toString() { return S.toString( GridDhtLocalPartition.class, this, "state", state(), "reservations", reservations(), "empty", map.isEmpty(), "createTime", U.format(createTime), "mapPubSize", mapPubSize); } }
/** Adapter for cache metrics. */ public class GridCacheMetricsAdapter implements GridCacheMetrics, Externalizable { /** Create time. */ private long createTime = U.currentTimeMillis(); /** Last read time. */ private volatile long readTime = createTime; /** Last update time. */ private volatile long writeTime = createTime; /** Last commit time. */ private volatile long commitTime = createTime; /** Last rollback time. */ private volatile long rollbackTime = createTime; /** Number of reads. */ private volatile int reads; /** Number of writes. */ private volatile int writes; /** Number of hits. */ private volatile int hits; /** Number of misses. */ private volatile int misses; /** Number of transaction commits. */ private volatile int txCommits; /** Number of transaction rollbacks. */ private volatile int txRollbacks; /** DR send data node metrics. */ private GridCacheDrSenderMetricsAdapter drSndMetrics; /** DR receive data node metrics. */ private GridCacheDrReceiverMetricsAdapter drRcvMetrics; /** Cache metrics. */ @GridToStringExclude private transient GridCacheMetricsAdapter delegate; /** No-args constructor. */ public GridCacheMetricsAdapter() { delegate = null; } /** * @param isDrSndCache True if data center replication configured for sending on that node. * @param isDrRcvCache True if data center replication configured for receiving on that node. */ public GridCacheMetricsAdapter(boolean isDrSndCache, boolean isDrRcvCache) { this(); if (isDrSndCache) drSndMetrics = new GridCacheDrSenderMetricsAdapter(); if (isDrRcvCache) drRcvMetrics = new GridCacheDrReceiverMetricsAdapter(); } /** @param m Metrics to copy from. */ public GridCacheMetricsAdapter(GridCacheMetrics m) { createTime = m.createTime(); readTime = m.readTime(); writeTime = m.writeTime(); commitTime = m.commitTime(); rollbackTime = m.rollbackTime(); reads = m.reads(); writes = m.writes(); hits = m.hits(); misses = m.misses(); txCommits = m.txCommits(); txRollbacks = m.txRollbacks(); drSndMetrics = ((GridCacheMetricsAdapter) m).drSndMetrics; drRcvMetrics = ((GridCacheMetricsAdapter) m).drRcvMetrics; } /** @param delegate Metrics to delegate to. */ public void delegate(GridCacheMetricsAdapter delegate) { this.delegate = delegate; } /** {@inheritDoc} */ @Override public long createTime() { return createTime; } /** {@inheritDoc} */ @Override public long writeTime() { return writeTime; } /** {@inheritDoc} */ @Override public long readTime() { return readTime; } /** {@inheritDoc} */ @Override public long commitTime() { return commitTime; } /** {@inheritDoc} */ @Override public long rollbackTime() { return rollbackTime; } /** {@inheritDoc} */ @Override public int reads() { return reads; } /** {@inheritDoc} */ @Override public int writes() { return writes; } /** {@inheritDoc} */ @Override public int hits() { return hits; } /** {@inheritDoc} */ @Override public int misses() { return misses; } /** {@inheritDoc} */ @Override public int txCommits() { return txCommits; } /** {@inheritDoc} */ @Override public int txRollbacks() { return txRollbacks; } /** {@inheritDoc} */ @Override public GridDrSenderCacheMetrics drSendMetrics() { if (drSndMetrics == null) throw new IllegalStateException("Data center replication is not configured."); return drSndMetrics; } /** {@inheritDoc} */ @Override public GridDrReceiverCacheMetrics drReceiveMetrics() { if (drRcvMetrics == null) throw new IllegalStateException("Data center replication is not configured."); return drRcvMetrics; } /** * Cache read callback. * * @param isHit Hit or miss flag. */ public void onRead(boolean isHit) { readTime = U.currentTimeMillis(); reads++; if (isHit) hits++; else misses++; if (delegate != null) delegate.onRead(isHit); } /** Cache write callback. */ public void onWrite() { writeTime = U.currentTimeMillis(); writes++; if (delegate != null) delegate.onWrite(); } /** Transaction commit callback. */ public void onTxCommit() { commitTime = U.currentTimeMillis(); txCommits++; if (delegate != null) delegate.onTxCommit(); } /** Transaction rollback callback. */ public void onTxRollback() { rollbackTime = U.currentTimeMillis(); txRollbacks++; if (delegate != null) delegate.onTxRollback(); } /** * Callback for received acknowledgement by sender hub. * * @param entriesCnt Number of entries in batch. */ public void onSenderCacheBatchAcknowledged(int entriesCnt) { drSndMetrics.onBatchAcked(entriesCnt); if (delegate != null) delegate.onSenderCacheBatchAcknowledged(entriesCnt); } /** * Callback for received batch error by sender hub. * * @param entriesCnt Number of entries in batch. */ public void onSenderCacheBatchFailed(int entriesCnt) { drSndMetrics.onBatchFailed(entriesCnt); if (delegate != null) delegate.onSenderCacheBatchFailed(entriesCnt); } /** * Callback for sent batch on sender cache side. * * @param entriesCnt Number of sent entries. */ public void onSenderCacheBatchSent(int entriesCnt) { drSndMetrics.onBatchSent(entriesCnt); if (delegate != null) delegate.onSenderCacheBatchSent(entriesCnt); } /** Callback for filtered entries on sender cache side. */ public void onSenderCacheEntryFiltered() { drSndMetrics.onEntryFiltered(); if (delegate != null) delegate.onSenderCacheEntryFiltered(); } /** * Callback for backup queue size changed. * * @param newSize New size of sender cache backup queue. */ public void onSenderCacheBackupQueueSizeChanged(int newSize) { drSndMetrics.onBackupQueueSizeChanged(newSize); if (delegate != null) delegate.onSenderCacheBackupQueueSizeChanged(newSize); } /** * Callback for replication pause state changed. * * @param pauseReason Pause reason or {@code null} if replication is not paused. */ public void onPauseStateChanged(@Nullable GridDrPauseReason pauseReason) { drSndMetrics.onPauseStateChanged(pauseReason); if (delegate != null) delegate.onPauseStateChanged(pauseReason); } /** * Callback for conflict resolver on receiver cache side. * * @param usedNew New conflict status flag. * @param usedOld Old conflict status flag. * @param usedMerge Merge conflict status flag. */ public void onReceiveCacheConflictResolved(boolean usedNew, boolean usedOld, boolean usedMerge) { drRcvMetrics.onReceiveCacheConflictResolved(usedNew, usedOld, usedMerge); if (delegate != null) delegate.onReceiveCacheConflictResolved(usedNew, usedOld, usedMerge); } /** * Callback for received entries from receiver hub. * * @param entriesCnt Number of received entries. */ public void onReceiveCacheEntriesReceived(int entriesCnt) { if (drRcvMetrics != null) drRcvMetrics.onReceiveCacheEntriesReceived(entriesCnt); if (delegate != null) delegate.onReceiveCacheEntriesReceived(entriesCnt); } /** * Create a copy of given metrics object. * * @param m Metrics to copy from. * @return Copy of given metrics. */ @Nullable public static GridCacheMetricsAdapter copyOf(@Nullable GridCacheMetrics m) { if (m == null) return null; return new GridCacheMetricsAdapter(m); } /** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeLong(createTime); out.writeLong(readTime); out.writeLong(writeTime); out.writeLong(commitTime); out.writeLong(rollbackTime); out.writeInt(reads); out.writeInt(writes); out.writeInt(hits); out.writeInt(misses); out.writeInt(txCommits); out.writeInt(txRollbacks); } /** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { createTime = in.readLong(); readTime = in.readLong(); writeTime = in.readLong(); commitTime = in.readLong(); rollbackTime = in.readLong(); reads = in.readInt(); writes = in.readInt(); hits = in.readInt(); misses = in.readInt(); txCommits = in.readInt(); txRollbacks = in.readInt(); } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridCacheMetricsAdapter.class, this); } }
/** Future adapter. */ public class GridFutureAdapter<R> extends AbstractQueuedSynchronizer implements IgniteInternalFuture<R> { /** */ private static final long serialVersionUID = 0L; /** Initial state. */ private static final int INIT = 0; /** Cancelled state. */ private static final int CANCELLED = 1; /** Done state. */ private static final int DONE = 2; /** */ private static final byte ERR = 1; /** */ private static final byte RES = 2; /** */ private byte resFlag; /** Result. */ @GridToStringInclude private Object res; /** Future start time. */ private final long startTime = U.currentTimeMillis(); /** Future end time. */ private volatile long endTime; /** */ private boolean ignoreInterrupts; /** */ private IgniteInClosure<? super IgniteInternalFuture<R>> lsnr; /** {@inheritDoc} */ @Override public long startTime() { return startTime; } /** {@inheritDoc} */ @Override public long duration() { long endTime = this.endTime; return endTime == 0 ? U.currentTimeMillis() - startTime : endTime - startTime; } /** @param ignoreInterrupts Ignore interrupts flag. */ public void ignoreInterrupts(boolean ignoreInterrupts) { this.ignoreInterrupts = ignoreInterrupts; } /** @return Future end time. */ public long endTime() { return endTime; } /** {@inheritDoc} */ @Override public Throwable error() { return (resFlag == ERR) ? (Throwable) res : null; } /** {@inheritDoc} */ @Override public R result() { return resFlag == RES ? (R) res : null; } /** {@inheritDoc} */ @Override public R get() throws IgniteCheckedException { try { if (endTime == 0) { if (ignoreInterrupts) acquireShared(0); else acquireSharedInterruptibly(0); } if (getState() == CANCELLED) throw new IgniteFutureCancelledCheckedException("Future was cancelled: " + this); assert resFlag != 0; if (resFlag == ERR) throw U.cast((Throwable) res); return (R) res; } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException(e); } } /** {@inheritDoc} */ @Override public R get(long timeout) throws IgniteCheckedException { // Do not replace with static import, as it may not compile. return get(timeout, TimeUnit.MILLISECONDS); } /** {@inheritDoc} */ @Override public R get(long timeout, TimeUnit unit) throws IgniteCheckedException { A.ensure(timeout >= 0, "timeout cannot be negative: " + timeout); A.notNull(unit, "unit"); try { return get0(unit.toNanos(timeout)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException( "Got interrupted while waiting for future to complete.", e); } } /** * @param nanosTimeout Timeout (nanoseconds). * @return Result. * @throws InterruptedException If interrupted. * @throws IgniteFutureTimeoutCheckedException If timeout reached before computation completed. * @throws IgniteCheckedException If error occurred. */ @Nullable protected R get0(long nanosTimeout) throws InterruptedException, IgniteCheckedException { if (endTime == 0 && !tryAcquireSharedNanos(0, nanosTimeout)) throw new IgniteFutureTimeoutCheckedException( "Timeout was reached before computation completed."); if (getState() == CANCELLED) throw new IgniteFutureCancelledCheckedException("Future was cancelled: " + this); assert resFlag != 0; if (resFlag == ERR) throw U.cast((Throwable) res); return (R) res; } /** {@inheritDoc} */ @Override public void listen(IgniteInClosure<? super IgniteInternalFuture<R>> lsnr0) { assert lsnr0 != null; boolean done = isDone(); if (!done) { synchronized (this) { done = isDone(); // Double check. if (!done) { if (lsnr == null) lsnr = lsnr0; else if (lsnr instanceof ArrayListener) ((ArrayListener) lsnr).add(lsnr0); else { lsnr = (IgniteInClosure) new ArrayListener<IgniteInternalFuture>(lsnr, lsnr0); } return; } } } assert done; notifyListener(lsnr0); } /** {@inheritDoc} */ @Override public <T> IgniteInternalFuture<T> chain( final IgniteClosure<? super IgniteInternalFuture<R>, T> doneCb) { return new ChainFuture<>(this, doneCb); } /** Notifies all registered listeners. */ private void notifyListeners() { IgniteInClosure<? super IgniteInternalFuture<R>> lsnr0; synchronized (this) { lsnr0 = lsnr; if (lsnr0 == null) return; lsnr = null; } assert lsnr0 != null; notifyListener(lsnr0); } /** * Notifies single listener. * * @param lsnr Listener. */ private void notifyListener(IgniteInClosure<? super IgniteInternalFuture<R>> lsnr) { assert lsnr != null; try { lsnr.apply(this); } catch (IllegalStateException e) { U.error( null, "Failed to notify listener (is grid stopped?) [fut=" + this + ", lsnr=" + lsnr + ", err=" + e.getMessage() + ']', e); } catch (RuntimeException | Error e) { U.error(null, "Failed to notify listener: " + lsnr, e); throw e; } } /** * Default no-op implementation that always returns {@code false}. Futures that do support * cancellation should override this method and call {@link #onCancelled()} callback explicitly if * cancellation indeed did happen. */ @Override public boolean cancel() throws IgniteCheckedException { return false; } /** {@inheritDoc} */ @Override public boolean isDone() { // Don't check for "valid" here, as "done" flag can be read // even in invalid state. return endTime != 0; } /** @return Checks is future is completed with exception. */ public boolean isFailed() { // Must read endTime first. return endTime != 0 && resFlag == ERR; } /** {@inheritDoc} */ @Override public boolean isCancelled() { return getState() == CANCELLED; } /** * Callback to notify that future is finished with {@code null} result. This method must delegate * to {@link #onDone(Object, Throwable)} method. * * @return {@code True} if result was set by this call. */ public final boolean onDone() { return onDone(null, null); } /** * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object, * Throwable)} method. * * @param res Result. * @return {@code True} if result was set by this call. */ public final boolean onDone(@Nullable R res) { return onDone(res, null); } /** * Callback to notify that future is finished. This method must delegate to {@link #onDone(Object, * Throwable)} method. * * @param err Error. * @return {@code True} if result was set by this call. */ public final boolean onDone(@Nullable Throwable err) { return onDone(null, err); } /** * Callback to notify that future is finished. Note that if non-{@code null} exception is passed * in the result value will be ignored. * * @param res Optional result. * @param err Optional error. * @return {@code True} if result was set by this call. */ public boolean onDone(@Nullable R res, @Nullable Throwable err) { return onDone(res, err, false); } /** * @param res Result. * @param err Error. * @param cancel {@code True} if future is being cancelled. * @return {@code True} if result was set by this call. */ private boolean onDone(@Nullable R res, @Nullable Throwable err, boolean cancel) { boolean notify = false; try { if (compareAndSetState(INIT, cancel ? CANCELLED : DONE)) { if (err != null) { resFlag = ERR; this.res = err; } else { resFlag = RES; this.res = res; } notify = true; releaseShared(0); return true; } return false; } finally { if (notify) notifyListeners(); } } /** * Callback to notify that future is cancelled. * * @return {@code True} if cancel flag was set by this call. */ public boolean onCancelled() { return onDone(null, null, true); } /** {@inheritDoc} */ @Override protected final int tryAcquireShared(int ignore) { return endTime != 0 ? 1 : -1; } /** {@inheritDoc} */ @Override protected final boolean tryReleaseShared(int ignore) { endTime = U.currentTimeMillis(); // Always signal after setting final done status. return true; } /** @return String representation of state. */ private String state() { int s = getState(); return s == INIT ? "INIT" : s == CANCELLED ? "CANCELLED" : "DONE"; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridFutureAdapter.class, this, "state", state()); } /** */ private static class ArrayListener<R> implements IgniteInClosure<IgniteInternalFuture<R>> { /** */ private static final long serialVersionUID = 0L; /** */ private IgniteInClosure<? super IgniteInternalFuture<R>>[] arr; /** @param lsnrs Listeners. */ private ArrayListener(IgniteInClosure... lsnrs) { this.arr = lsnrs; } /** {@inheritDoc} */ @Override public void apply(IgniteInternalFuture<R> fut) { for (int i = 0; i < arr.length; i++) arr[i].apply(fut); } /** @param lsnr Listener. */ void add(IgniteInClosure<? super IgniteInternalFuture<R>> lsnr) { arr = Arrays.copyOf(arr, arr.length + 1); arr[arr.length - 1] = lsnr; } /** {@inheritDoc} */ @Override public String toString() { return S.toString(ArrayListener.class, this, "arrSize", arr.length); } } /** */ private static class ChainFuture<R, T> extends GridFutureAdapter<T> { /** */ private static final long serialVersionUID = 0L; /** */ private GridFutureAdapter<R> fut; /** */ private IgniteClosure<? super IgniteInternalFuture<R>, T> doneCb; /** */ public ChainFuture() { // No-op. } /** * @param fut Future. * @param doneCb Closure. */ ChainFuture( GridFutureAdapter<R> fut, IgniteClosure<? super IgniteInternalFuture<R>, T> doneCb) { this.fut = fut; this.doneCb = doneCb; fut.listen(new GridFutureChainListener<>(this, doneCb)); } /** {@inheritDoc} */ @Override public String toString() { return "ChainFuture [orig=" + fut + ", doneCb=" + doneCb + ']'; } } }
/** * Log message. * * @param log Logger. * @param msg Message. */ public static void log(@Nullable IgniteLogger log, String msg) { log0(log, U.currentTimeMillis(), " " + msg); }
/** {@inheritDoc} */ @Override public long duration() { long endTime = this.endTime; return endTime == 0 ? U.currentTimeMillis() - startTime : endTime - startTime; }
/** @throws Exception If failed. */ private void doTest() throws Exception { System.gc(); System.gc(); System.gc(); try { useCache = true; startGridsMultiThreaded(GRID_CNT); useCache = false; Ignite ignite = startGrid(); final IgniteDataStreamer<Integer, String> ldr = ignite.dataStreamer(null); ldr.perNodeBufferSize(8192); ldr.receiver(DataStreamerCacheUpdaters.<Integer, String>batchedSorted()); ldr.autoFlushFrequency(0); final LongAdder8 cnt = new LongAdder8(); long start = U.currentTimeMillis(); Thread t = new Thread( new Runnable() { @SuppressWarnings("BusyWait") @Override public void run() { while (true) { try { Thread.sleep(10000); } catch (InterruptedException ignored) { break; } info(">>> Adds/sec: " + cnt.sumThenReset() / 10); } } }); t.setDaemon(true); t.start(); int threadNum = 2; // Runtime.getRuntime().availableProcessors(); multithreaded( new Callable<Object>() { @SuppressWarnings("InfiniteLoopStatement") @Override public Object call() throws Exception { ThreadLocalRandom8 rnd = ThreadLocalRandom8.current(); while (true) { int i = rnd.nextInt(ENTRY_CNT); ldr.addData(i, vals[rnd.nextInt(vals.length)]); cnt.increment(); } } }, threadNum, "loader"); info("Closing loader..."); ldr.close(false); long duration = U.currentTimeMillis() - start; info("Finished performance test. Duration: " + duration + "ms."); } finally { stopAllGrids(); } }