/** * Adds owned versions to map. * * @param vers Map of owned versions. */ public void ownedVersions(Map<IgniteTxKey, GridCacheVersion> vers) { if (F.isEmpty(vers)) return; if (owned == null) owned = new GridLeanMap<>(vers.size()); owned.putAll(vers); }
/** * @param ctx Kernal context. * @param cfg Ignite configuration. * @param providers Plugin providers. */ @SuppressWarnings("TypeMayBeWeakened") public IgnitePluginProcessor( GridKernalContext ctx, IgniteConfiguration cfg, List<PluginProvider> providers) { super(ctx); ExtensionRegistryImpl registry = new ExtensionRegistryImpl(); for (PluginProvider provider : providers) { GridPluginContext pluginCtx = new GridPluginContext(ctx, cfg); if (F.isEmpty(provider.name())) throw new IgniteException("Plugin name can not be empty."); if (plugins.containsKey(provider.name())) throw new IgniteException("Duplicated plugin name: " + provider.name()); plugins.put(provider.name(), provider); pluginCtxMap.put(provider, pluginCtx); provider.initExtensions(pluginCtx, registry); if (provider.plugin() == null) throw new IgniteException("Plugin is null."); } extensions = registry.createExtensionMap(); }
/** * Sends query request. * * @param fut Distributed future. * @param req Request. * @param nodes Nodes. * @throws IgniteCheckedException In case of error. */ @SuppressWarnings("unchecked") private void sendRequest( final GridCacheDistributedQueryFuture<?, ?, ?> fut, final GridCacheQueryRequest req, Collection<ClusterNode> nodes) throws IgniteCheckedException { assert fut != null; assert req != null; assert nodes != null; final UUID locNodeId = cctx.localNodeId(); ClusterNode locNode = null; Collection<ClusterNode> rmtNodes = null; for (ClusterNode n : nodes) { if (n.id().equals(locNodeId)) locNode = n; else { if (rmtNodes == null) rmtNodes = new ArrayList<>(nodes.size()); rmtNodes.add(n); } } // Request should be sent to remote nodes before the query is processed on the local node. // For example, a remote reducer has a state, we should not serialize and then send // the reducer changed by the local node. if (!F.isEmpty(rmtNodes)) { cctx.io() .safeSend( rmtNodes, req, cctx.ioPolicy(), new P1<ClusterNode>() { @Override public boolean apply(ClusterNode node) { fut.onNodeLeft(node.id()); return !fut.isDone(); } }); } if (locNode != null) { cctx.closures() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { req.beforeLocalExecution(cctx); processQueryRequest(locNodeId, req); return null; } }); } }
/** * Resolve load balancer from string definition. * * @param balancer Load balancer string definition. * @return Resolved load balancer. * @throws GridClientException If loading failed. */ private static GridClientLoadBalancer resolveBalancer(String balancer) throws GridClientException { if (F.isEmpty(balancer) || "random".equals(balancer)) return new GridClientRandomBalancer(); if ("roundrobin".equals(balancer)) return new GridClientRoundRobinBalancer(); return newInstance(GridClientLoadBalancer.class, balancer); }
/** * Resolve data affinity from string definition. * * @param affinity Data affinity string definition. * @return Resolved data affinity. * @throws GridClientException If loading failed. */ private static GridClientDataAffinity resolveAffinity(String affinity) throws GridClientException { if (F.isEmpty(affinity)) return null; if ("partitioned".equals(affinity)) return new GridClientPartitionAffinity(); return newInstance(GridClientDataAffinity.class, affinity); }
/** * Creates node predicate that evaluates to {@code true} for all provided nodes. Implementation * will make a defensive copy. * * @param nodes Optional grid nodes. If none provided - predicate will always return {@code * false}. */ public GridNodePredicate(@Nullable GridNode... nodes) { if (F.isEmpty(nodes)) ids = Collections.emptySet(); else if (nodes.length == 1) ids = Collections.singleton(nodes[0].id()); else { ids = new HashSet<>(nodes.length); for (GridNode n : nodes) ids.add(n.id()); } }
/** * Creates node predicate that evaluates to {@code true} for all provided node IDs. Implementation * will make a defensive copy. * * @param ids Optional node IDs. If none provided - predicate will always return {@code false}. */ public GridNodePredicate(@Nullable UUID... ids) { if (F.isEmpty(ids)) this.ids = Collections.emptySet(); else if (ids.length == 1) this.ids = Collections.singleton(ids[0]); else { this.ids = new HashSet<>(ids.length); Collections.addAll(this.ids, ids); } }
/** * @param part Partition. * @param topVer Topology version. * @return Backup nodes. */ public Collection<GridNode> backups(int part, long topVer) { Collection<GridNode> nodes = nodes(part, topVer); assert !F.isEmpty(nodes); if (nodes.size() <= 1) return Collections.emptyList(); return F.view(nodes, F.notEqualTo(nodes.iterator().next())); }
/** * @param keys Keys. * @param timeout Timeout. * @param tx Transaction. * @param filter Filter. * @return Future. */ public GridFuture<Boolean> lockAllAsync( Collection<? extends K> keys, long timeout, @Nullable GridCacheTxLocalEx<K, V> tx, GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (F.isEmpty(keys)) { return new GridFinishedFuture<Boolean>(ctx.kernalContext(), true); } GridLocalLockFuture<K, V> fut = new GridLocalLockFuture<K, V>(ctx, keys, tx, this, timeout, filter); try { for (K key : keys) { while (true) { GridLocalCacheEntry<K, V> entry = null; try { entry = entryExx(key); if (!ctx.isAll(entry, filter)) { fut.onFailed(); return fut; } // Removed exception may be thrown here. GridCacheMvccCandidate<K> cand = fut.addEntry(entry); if (cand == null && fut.isDone()) { return fut; } break; } catch (GridCacheEntryRemovedException ignored) { if (log().isDebugEnabled()) { log().debug("Got removed entry in lockAsync(..) method (will retry): " + entry); } } } } if (!ctx.mvcc().addFuture(fut)) fut.onError(new GridException("Duplicate future ID (internal error): " + fut)); // Must have future added prior to checking locks. fut.checkLocks(); return fut; } catch (GridException e) { fut.onError(e); return fut; } }
/** * Checks for explicit events configuration. * * @param ignite Grid instance. * @return {@code true} if all task events explicitly specified in configuration. */ public static boolean checkExplicitTaskMonitoring(Ignite ignite) { int[] evts = ignite.configuration().getIncludeEventTypes(); if (F.isEmpty(evts)) return false; for (int evt : VISOR_TASK_EVTS) { if (!F.contains(evts, evt)) return false; } return true; }
/** {@inheritDoc} */ @Override public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) { if (F.isEmpty(lsnr)) synchronized (mux) { lsnrs.clear(); } else synchronized (mux) { lsnrs.removeAll(F.asList(lsnr)); } }
/** * @param cacheId Cache ID. * @return {@code True} if local client has been added. */ public boolean isLocalClientAdded(int cacheId) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } return false; }
/** * @param cacheId Cache ID to check. * @param topVer Topology version. * @return {@code True} if cache was added during this exchange. */ public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) { if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (req.start() && !req.clientStartOnly()) { if (CU.cacheId(req.cacheName()) == cacheId) return true; } } } GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId); return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer); }
/** * Parses HTTP parameters in an appropriate format and return back map of values to predefined * list of names. * * @param req Request. * @return Map of parsed parameters. */ @SuppressWarnings({"unchecked"}) private Map<String, Object> parameters(ServletRequest req) { Map<String, String[]> params = req.getParameterMap(); if (F.isEmpty(params)) return Collections.emptyMap(); Map<String, Object> map = U.newHashMap(params.size()); for (Map.Entry<String, String[]> entry : params.entrySet()) map.put(entry.getKey(), parameter(entry.getValue())); return map; }
/** * Checks {@link GridSystemProperties#GG_JETTY_PORT} system property and overrides default * connector port if it present. Then initializes {@code port} with the found value. * * @param con Jetty connector. */ private void override(AbstractNetworkConnector con) { String host = System.getProperty(GG_JETTY_HOST); if (!F.isEmpty(host)) con.setHost(host); int currPort = con.getPort(); Integer overridePort = Integer.getInteger(GG_JETTY_PORT); if (overridePort != null && overridePort != 0) currPort = overridePort; con.setPort(currPort); port = currPort; }
/** {@inheritDoc} */ @Override public GridFuture<Map<K, V>> getAllAsync( @Nullable Collection<? extends K> keys, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { ctx.denyOnFlag(LOCAL); if (F.isEmpty(keys)) return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap()); GridCacheTxLocalAdapter<K, V> tx = ctx.tm().threadLocalTx(); if (tx != null && !tx.implicit()) return ctx.wrapCloneMap(tx.getAllAsync(keys, filter)); return loadAsync(keys, false, filter); }
/** @param mappings Mappings. */ void addEntryMapping(@Nullable Map<UUID, GridDistributedTxMapping<K, V>> mappings) { if (!F.isEmpty(mappings)) { this.mappings.putAll(mappings); if (log.isDebugEnabled()) log.debug( "Added mappings to transaction [locId=" + cctx.nodeId() + ", mappings=" + mappings + ", tx=" + this + ']'); } }
/** * @param cacheId Cache ID to check. * @return {@code True} if cache is stopping by this exchange. */ private boolean stopping(int cacheId) { boolean stopping = false; if (!F.isEmpty(reqs)) { for (DynamicCacheChangeRequest req : reqs) { if (cacheId == CU.cacheId(req.cacheName())) { stopping = req.stop(); break; } } } return stopping; }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> GridCacheQueryFuture<R> execute( @Nullable GridReducer<T, R> rmtReducer, @Nullable GridClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<GridNode> nodes = nodes(); cctx.checkSecurity(GridSecurityPermission.CACHE_READ); if (F.isEmpty(nodes)) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx())); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (GridException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); GridCacheQueryBean bean = new GridCacheQueryBean( this, (GridReducer<Object, Object>) rmtReducer, (GridClosure<Object, Object>) rmtTransform, args); GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS) return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** {@inheritDoc} */ @Override public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) { Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer1=" + topVer + ", topVer2=" + this.topVer + ", cache=" + cctx.name() + ", node2part=" + node2part + ']'; Collection<ClusterNode> nodes = null; Collection<UUID> nodeIds = part2node.get(p); if (!F.isEmpty(nodeIds)) { Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id())); for (UUID nodeId : nodeIds) { if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) { ClusterNode n = cctx.discovery().node(nodeId); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) { if (nodes == null) { nodes = new ArrayList<>(affNodes.size() + 2); nodes.addAll(affNodes); } nodes.add(n); } } } } return nodes != null ? nodes : affNodes; } finally { lock.readLock().unlock(); } }
/** * @param ldr Loader. * @param nodeId Sender node ID. * @param req Request. * @return Remote transaction. * @throws GridException If failed. */ @Nullable public GridNearTxRemote<K, V> startRemoteTx( ClassLoader ldr, UUID nodeId, GridDhtTxPrepareRequest<K, V> req) throws GridException { if (!F.isEmpty(req.nearWrites())) { GridNearTxRemote<K, V> tx = new GridNearTxRemote<K, V>( ldr, nodeId, req.nearNodeId(), req.threadId(), req.version(), req.commitVersion(), req.concurrency(), req.isolation(), req.isInvalidate(), req.timeout(), req.nearWrites(), ctx); if (!tx.empty()) { tx = ctx.tm().onCreated(tx); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException("Attempt to start a completed transaction: " + tx); // Prepare prior to reordering, so the pending locks added // in prepare phase will get properly ordered as well. tx.prepare(); // Add remote candidates and reorder completed and uncompleted versions. tx.addRemoteCandidates( req.candidatesByKey(), req.committedVersions(), req.rolledbackVersions()); if (req.concurrency() == EVENTUALLY_CONSISTENT) { if (log.isDebugEnabled()) log.debug("Committing transaction during remote prepare: " + tx); tx.commit(); if (log.isDebugEnabled()) log.debug("Committed transaction during remote prepare: " + tx); } } return tx; } return null; }
/** * @param keys Keys to load. * @param reload Reload flag. * @param filter Filter. * @return Loaded values. */ public GridFuture<Map<K, V>> loadAsync( @Nullable Collection<? extends K> keys, boolean reload, @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) { if (F.isEmpty(keys)) return new GridFinishedFuture<Map<K, V>>(ctx.kernalContext(), Collections.<K, V>emptyMap()); GridNearGetFuture<K, V> fut = new GridNearGetFuture<K, V>(ctx, keys, reload, null, filter); // Register future for responses. ctx.mvcc().addFuture(fut); fut.init(); return ctx.wrapCloneMap(fut); }
/** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { IpcSharedMemoryNativeLoader.load(log); pid = IpcSharedMemoryUtils.pid(); if (pid == -1) throw new IpcEndpointBindException("Failed to get PID of the current process."); if (size <= 0) throw new IpcEndpointBindException("Space size should be positive: " + size); String tokDirPath = this.tokDirPath; if (F.isEmpty(tokDirPath)) throw new IpcEndpointBindException("Token directory path is empty."); tokDirPath = tokDirPath + '/' + locNodeId.toString() + '-' + IpcSharedMemoryUtils.pid(); tokDir = U.resolveWorkDirectory(tokDirPath, false); if (port <= 0 || port >= 0xffff) throw new IpcEndpointBindException("Port value is illegal: " + port); try { srvSock = new ServerSocket(); // Always bind to loopback. srvSock.bind(new InetSocketAddress("127.0.0.1", port)); } catch (IOException e) { // Although empty socket constructor never throws exception, close it just in case. U.closeQuiet(srvSock); throw new IpcEndpointBindException( "Failed to bind shared memory IPC endpoint (is port already " + "in use?): " + port, e); } gcWorker = new GcWorker(gridName, "ipc-shmem-gc", log); new IgniteThread(gcWorker).start(); if (log.isInfoEnabled()) log.info( "IPC shared memory server endpoint started [port=" + port + ", tokDir=" + tokDir.getAbsolutePath() + ']'); }
/** {@inheritDoc} */ @Override public void prepareMarshal(GridCacheContext<K, V> ctx) throws GridException { super.prepareMarshal(ctx); if (err != null) errBytes = ctx.marshaller().marshal(err); metaDataBytes = marshalCollection(metadata, ctx); dataBytes = fields ? marshalFieldsCollection(data, ctx) : marshalCollection(data, ctx); if (ctx.deploymentEnabled() && !F.isEmpty(data)) { for (Object o : data) { if (o instanceof Map.Entry) { Map.Entry e = (Map.Entry) o; prepareObject(e.getKey(), ctx); prepareObject(e.getValue(), ctx); } } } }
/** {@inheritDoc} */ @Override public boolean afterExchange(GridDhtPartitionsExchangeFuture exchFut) throws IgniteCheckedException { boolean changed = waitForRent(); ClusterNode loc = cctx.localNode(); int num = cctx.affinity().partitions(); AffinityTopologyVersion topVer = exchFut.topologyVersion(); lock.writeLock().lock(); try { if (stopping) return false; assert topVer.equals(exchFut.topologyVersion()) : "Invalid topology version [topVer=" + topVer + ", exchId=" + exchFut.exchangeId() + ']'; if (log.isDebugEnabled()) log.debug( "Partition map before afterExchange [exchId=" + exchFut.exchangeId() + ", fullMap=" + fullMapString() + ']'); long updateSeq = this.updateSeq.incrementAndGet(); for (int p = 0; p < num; p++) { GridDhtLocalPartition locPart = localPartition(p, topVer, false, false); if (cctx.affinity().localNode(p, topVer)) { // This partition will be created during next topology event, // which obviously has not happened at this point. if (locPart == null) { if (log.isDebugEnabled()) log.debug("Skipping local partition afterExchange (will not create): " + p); continue; } GridDhtPartitionState state = locPart.state(); if (state == MOVING) { if (cctx.rebalanceEnabled()) { Collection<ClusterNode> owners = owners(p); // If there are no other owners, then become an owner. if (F.isEmpty(owners)) { boolean owned = locPart.own(); assert owned : "Failed to own partition [cacheName" + cctx.name() + ", locPart=" + locPart + ']'; updateLocal(p, loc.id(), locPart.state(), updateSeq); changed = true; if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_PART_DATA_LOST)) { DiscoveryEvent discoEvt = exchFut.discoveryEvent(); cctx.events() .addPreloadEvent( p, EVT_CACHE_REBALANCE_PART_DATA_LOST, discoEvt.eventNode(), discoEvt.type(), discoEvt.timestamp()); } if (log.isDebugEnabled()) log.debug("Owned partition: " + locPart); } else if (log.isDebugEnabled()) log.debug( "Will not own partition (there are owners to rebalance from) [locPart=" + locPart + ", owners = " + owners + ']'); } else updateLocal(p, loc.id(), locPart.state(), updateSeq); } } else { if (locPart != null) { GridDhtPartitionState state = locPart.state(); if (state == MOVING) { locPart.rent(false); updateLocal(p, loc.id(), locPart.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicting moving partition (it does not belong to affinity): " + locPart); } } } } consistencyCheck(); } finally { lock.writeLock().unlock(); } return changed; }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * Creates node predicate that evaluates to {@code true} for all provided node IDs. Implementation * will make a defensive copy. * * @param ids Optional node IDs. If none provided - predicate will always return {@code false}. */ public GridNodePredicate(@Nullable Collection<UUID> ids) { this.ids = F.isEmpty(ids) ? Collections.<UUID>emptySet() : ids.size() == 1 ? Collections.singleton(F.first(ids)) : new HashSet<>(ids); }
/** * @param nodeId Sender node ID. * @param req Finish transaction message. */ @SuppressWarnings({"CatchGenericClass"}) private void processFinishRequest(UUID nodeId, GridDistributedTxFinishRequest<K, V> req) { assert nodeId != null; assert req != null; GridReplicatedTxRemote<K, V> tx = ctx.tm().tx(req.version()); try { ClassLoader ldr = ctx.deploy().globalLoader(); if (req.commit()) { // If lock was acquired explicitly. if (tx == null) { // Create transaction and add entries. tx = ctx.tm() .onCreated( new GridReplicatedTxRemote<K, V>( ldr, nodeId, req.threadId(), req.version(), req.commitVersion(), PESSIMISTIC, READ_COMMITTED, req.isInvalidate(), /*timeout */ 0, /*read entries*/ null, req.writes(), ctx)); if (tx == null || !ctx.tm().onStarted(tx)) throw new GridCacheTxRollbackException( "Attempt to start a completed " + "transaction: " + req); } else { boolean set = tx.commitVersion(req.commitVersion()); assert set; } Collection<GridCacheTxEntry<K, V>> writeEntries = req.writes(); if (!F.isEmpty(writeEntries)) { // In OPTIMISTIC mode, we get the values at PREPARE stage. assert tx.concurrency() == PESSIMISTIC; for (GridCacheTxEntry<K, V> entry : writeEntries) { // Unmarshal write entries. entry.unmarshal(ctx, ldr); if (log.isDebugEnabled()) log.debug( "Unmarshalled transaction entry from pessimistic transaction [key=" + entry.key() + ", value=" + entry.value() + ", tx=" + tx + ']'); if (!tx.setWriteValue(entry)) U.warn( log, "Received entry to commit that was not present in transaction [entry=" + entry + ", tx=" + tx + ']'); } } // Add completed versions. tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); if (tx.pessimistic()) tx.prepare(); tx.commit(); } else if (tx != null) { tx.doneRemote(req.baseVersion(), req.committedVersions(), req.rolledbackVersions()); tx.rollback(); } if (req.replyRequired()) { GridCacheMessage<K, V> res = new GridDistributedTxFinishResponse<K, V>(req.version(), req.futureId()); try { ctx.io().send(nodeId, res); } catch (Throwable e) { // Double-check. if (ctx.discovery().node(nodeId) == null) { if (log.isDebugEnabled()) log.debug( "Node left while sending finish response [nodeId=" + nodeId + ", res=" + res + ']'); } else U.error( log, "Failed to send finish response to node [nodeId=" + nodeId + ", res=" + res + ']', e); } } } catch (GridCacheTxRollbackException e) { if (log.isDebugEnabled()) log.debug("Attempted to start a completed transaction (will ignore): " + e); } catch (Throwable e) { U.error( log, "Failed completing transaction [commit=" + req.commit() + ", tx=" + CU.txString(tx) + ']', e); if (tx != null) tx.rollback(); } }
/** * @param nodeId Reader to add. * @param msgId Message ID. * @return Future for all relevant transactions that were active at the time of adding reader, or * {@code null} if reader was added * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable public GridFuture<Boolean> addReader(UUID nodeId, long msgId) throws GridCacheEntryRemovedException { // Don't add local node as reader. if (cctx.nodeId().equals(nodeId)) return null; GridNode node = cctx.discovery().node(nodeId); // If remote node has no near cache, don't add it. if (node == null || !U.hasNearCache(node, cctx.dht().near().name())) return null; // If remote node is (primary?) or back up, don't add it as a reader. if (U.nodeIds(cctx.affinity(partition(), CU.allNodes(cctx))).contains(nodeId)) return null; boolean ret = false; GridCacheMultiTxFuture<K, V> txFut; Collection<GridCacheMvccCandidate<K>> cands = null; synchronized (mux) { checkObsolete(); txFut = this.txFut; ReaderId reader = readerId(nodeId); if (reader == null) { reader = new ReaderId(nodeId, msgId); readers = new LinkedList<ReaderId>(readers); readers.add(reader); // Seal. readers = Collections.unmodifiableList(readers); txFut = this.txFut = new GridCacheMultiTxFuture<K, V>(cctx); cands = localCandidates(); ret = true; } else { long id = reader.messageId(); if (id < msgId) reader.messageId(msgId); } } if (ret) { assert txFut != null; if (!F.isEmpty(cands)) { for (GridCacheMvccCandidate<K> c : cands) { GridCacheTxEx<K, V> tx = cctx.tm().<GridCacheTxEx<K, V>>tx(c.version()); if (tx != null) { assert tx.local(); txFut.addTx(tx); } } } txFut.init(); if (!txFut.isDone()) { txFut.listenAsync( new CI1<GridFuture<?>>() { @Override public void apply(GridFuture<?> f) { synchronized (mux) { // Release memory. GridDhtCacheEntry.this.txFut = null; } } }); } else // Release memory. txFut = this.txFut = null; } return txFut; }
/** * Initializes store. * * @throws GridException If failed to initialize. */ private void init() throws GridException { if (initGuard.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Initializing cache store."); try { if (sesFactory != null) // Session factory has been provided - nothing to do. return; if (!F.isEmpty(hibernateCfgPath)) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url); // Session factory has been successfully initialized. return; } catch (MalformedURLException e) { if (log.isDebugEnabled()) log.debug("Caught malformed URL exception: " + e.getMessage()); } // Provided path is not a valid URL. File? File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) { sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using file: " + hibernateCfgPath); // Session factory has been successfully initialized. return; } // Provided path is not a file. Classpath resource? sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using classpath resource: " + hibernateCfgPath); } else { if (hibernateProps == null) { U.warn( log, "No Hibernate configuration has been provided for store (will use default)."); hibernateProps = new Properties(); hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL); hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL); hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO); } Configuration cfg = new Configuration(); cfg.setProperties(hibernateProps); assert resourceAvailable(MAPPING_RESOURCE); cfg.addResource(MAPPING_RESOURCE); sesFactory = cfg.buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using properties: " + hibernateProps); } } catch (HibernateException e) { throw new GridException("Failed to initialize store.", e); } finally { initLatch.countDown(); } } else if (initLatch.getCount() > 0) U.await(initLatch); if (sesFactory == null) throw new GridException("Cache store was not properly initialized."); }