/** @return Nodes to execute on. */ private Collection<ClusterNode> nodes() { CacheMode cacheMode = cctx.config().getCacheMode(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn( log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null || partition() != null) return nodes(cctx, prj, partition()); return cctx.affinityNode() ? Collections.singletonList(cctx.localNode()) : Collections.singletonList(F.rand(nodes(cctx, null, partition()))); case PARTITIONED: return nodes(cctx, prj, partition()); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
/** * Tests offset and limit clauses for query. * * @throws Exception If failed. */ public void testOffsetLimit() throws Exception { IgniteCache<Integer, Integer> c = ignite(0).getOrCreateCache(cacheConfig("ints", true, Integer.class, Integer.class)); try { List<Integer> res = new ArrayList<>(); Random rnd = new GridRandom(); for (int i = 0; i < 10; i++) { int val = rnd.nextInt(100); c.put(i, val); res.add(val); } Collections.sort(res); String qry = "select _val from Integer order by _val "; assertEqualsCollections(res, columnQuery(c, qry)); assertEqualsCollections(res.subList(0, 0), columnQuery(c, qry + "limit ?", 0)); assertEqualsCollections(res.subList(0, 3), columnQuery(c, qry + "limit ?", 3)); assertEqualsCollections(res.subList(0, 9), columnQuery(c, qry + "limit ? offset ?", 9, 0)); assertEqualsCollections(res.subList(3, 7), columnQuery(c, qry + "limit ? offset ?", 4, 3)); assertEqualsCollections(res.subList(7, 9), columnQuery(c, qry + "limit ? offset ?", 2, 7)); assertEqualsCollections(res.subList(8, 10), columnQuery(c, qry + "limit ? offset ?", 2, 8)); assertEqualsCollections(res.subList(9, 10), columnQuery(c, qry + "limit ? offset ?", 1, 9)); assertEqualsCollections(res.subList(10, 10), columnQuery(c, qry + "limit ? offset ?", 1, 10)); assertEqualsCollections( res.subList(9, 10), columnQuery(c, qry + "limit ? offset abs(-(4 + ?))", 1, 5)); } finally { c.destroy(); } }
/** * @param cctx Cache context. * @param prj Projection (optional). * @return Collection of data nodes in provided projection (if any). */ private static Collection<ClusterNode> nodes( final GridCacheContext<?, ?> cctx, @Nullable final ClusterGroup prj, @Nullable final Integer part) { assert cctx != null; final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion(); Collection<ClusterNode> affNodes = CU.affinityNodes(cctx); if (prj == null && part == null) return affNodes; final Set<ClusterNode> owners = part == null ? Collections.<ClusterNode>emptySet() : new HashSet<>(cctx.topology().owners(part, topVer)); return F.view( affNodes, new P1<ClusterNode>() { @Override public boolean apply(ClusterNode n) { return cctx.discovery().cacheAffinityNode(n, cctx.name()) && (prj == null || prj.node(n.id()) != null) && (part == null || owners.contains(n)); } }); }
/** * This constructor is meant for optimistic transactions. * * @param ldr Class loader. * @param nodeId Node ID. * @param nearNodeId Near node ID. * @param rmtThreadId Remote thread ID. * @param xidVer XID version. * @param commitVer Commit version. * @param sys System flag. * @param concurrency Concurrency level (should be pessimistic). * @param isolation Transaction isolation. * @param invalidate Invalidate flag. * @param timeout Timeout. * @param writeEntries Write entries. * @param ctx Cache registry. * @param txSize Expected transaction size. * @throws IgniteCheckedException If unmarshalling failed. */ public GridNearTxRemote( GridCacheSharedContext ctx, ClassLoader ldr, UUID nodeId, UUID nearNodeId, long rmtThreadId, GridCacheVersion xidVer, GridCacheVersion commitVer, boolean sys, byte plc, TransactionConcurrency concurrency, TransactionIsolation isolation, boolean invalidate, long timeout, Collection<IgniteTxEntry> writeEntries, int txSize, @Nullable UUID subjId, int taskNameHash) throws IgniteCheckedException { super( ctx, nodeId, rmtThreadId, xidVer, commitVer, sys, plc, concurrency, isolation, invalidate, timeout, txSize, subjId, taskNameHash); assert nearNodeId != null; this.nearNodeId = nearNodeId; readMap = Collections.emptyMap(); writeMap = new LinkedHashMap<>( writeEntries != null ? Math.max(txSize, writeEntries.size()) : txSize, 1.0f); if (writeEntries != null) { for (IgniteTxEntry entry : writeEntries) { entry.unmarshal(ctx, true, ldr); addEntry(entry); } } }
/** * @param cacheCtx Cache context. * @throws IgniteCheckedException If failed. */ private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException { if (stopping(cacheCtx.cacheId())) return; if (canCalculateAffinity(cacheCtx)) { if (log.isDebugEnabled()) log.debug( "Will recalculate affinity [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt); } else { if (log.isDebugEnabled()) log.debug( "Will request affinity from remote node [locNodeId=" + cctx.localNodeId() + ", exchId=" + exchId + ']'); // Fetch affinity assignment from remote node. GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture( cacheCtx, exchId.topologyVersion(), CU.affinityNodes(cacheCtx, exchId.topologyVersion())); fetchFut.init(); List<List<ClusterNode>> affAssignment = fetchFut.get(); if (log.isDebugEnabled()) log.debug( "Fetched affinity from remote node, initializing affinity assignment [locNodeId=" + cctx.localNodeId() + ", topVer=" + exchId.topologyVersion() + ']'); if (affAssignment == null) { affAssignment = new ArrayList<>(cacheCtx.affinity().partitions()); List<ClusterNode> empty = Collections.emptyList(); for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty); } cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment); } }
/** * @param p Partition. * @param topVer Topology version ({@code -1} for all nodes). * @param state Partition state. * @param states Additional partition states. * @return List of nodes for the partition. */ private List<ClusterNode> nodes( int p, AffinityTopologyVersion topVer, GridDhtPartitionState state, GridDhtPartitionState... states) { Collection<UUID> allIds = topVer.topologyVersion() > 0 ? F.nodeIds(CU.affinityNodes(cctx, topVer)) : null; lock.readLock().lock(); try { assert node2part != null && node2part.valid() : "Invalid node-to-partitions map [topVer=" + topVer + ", allIds=" + allIds + ", node2part=" + node2part + ", cache=" + cctx.name() + ']'; Collection<UUID> nodeIds = part2node.get(p); // Node IDs can be null if both, primary and backup, nodes disappear. int size = nodeIds == null ? 0 : nodeIds.size(); if (size == 0) return Collections.emptyList(); List<ClusterNode> nodes = new ArrayList<>(size); for (UUID id : nodeIds) { if (topVer.topologyVersion() > 0 && !allIds.contains(id)) continue; if (hasState(p, id, state, states)) { ClusterNode n = cctx.discovery().node(id); if (n != null && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) nodes.add(n); } } return nodes; } finally { lock.readLock().unlock(); } }
/** * @param fld Folder with files to match. * @param ptrn Pattern to match against file name. * @return Collection of matched files. */ public static List<VisorLogFile> matchedFiles(File fld, final String ptrn) { List<VisorLogFile> files = fileTree( fld, MAX_FOLDER_DEPTH, new FileFilter() { @Override public boolean accept(File f) { return !f.isHidden() && (f.isDirectory() || f.isFile() && f.getName().matches(ptrn)); } }); Collections.sort(files, LAST_MODIFIED); return files; }
/** {@inheritDoc} */ @Override public boolean onDone(AffinityTopologyVersion res, Throwable err) { Map<Integer, Boolean> m = null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) { if (m == null) m = new HashMap<>(); m.put( cacheCtx.cacheId(), cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes())); } } cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap(); cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err); cctx.exchange().onExchangeDone(this, err); if (super.onDone(res, err) && !dummy && !forcePreload) { if (log.isDebugEnabled()) log.debug( "Completed partition exchange [localNode=" + cctx.localNodeId() + ", exchange= " + this + ']'); initFut.onDone(err == null); GridTimeoutObject timeoutObj = this.timeoutObj; // Deschedule timeout object. if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj); if (exchId.isLeft()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) cacheCtx.config().getAffinity().removeNode(exchId.nodeId()); } return true; } return dummy; }
/** * Finds all files in folder and in it's sub-tree of specified depth. * * @param file Starting folder * @param maxDepth Depth of the tree. If 1 - just look in the folder, no sub-folders. * @param filter file filter. * @return List of found files. */ public static List<VisorLogFile> fileTree(File file, int maxDepth, @Nullable FileFilter filter) { if (file.isDirectory()) { File[] files = (filter == null) ? file.listFiles() : file.listFiles(filter); if (files == null) return Collections.emptyList(); List<VisorLogFile> res = new ArrayList<>(files.length); for (File f : files) { if (f.isFile() && f.length() > 0) res.add(new VisorLogFile(f)); else if (maxDepth > 1) res.addAll(fileTree(f, maxDepth - 1, filter)); } return res; } return F.asList(new VisorLogFile(file)); }
private void init() { ClusterNode node = nodes.poll(); GridCacheQueryFutureAdapter<?, ?, R> fut0 = (GridCacheQueryFutureAdapter<?, ?, R>) (node.isLocal() ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, Collections.singleton(node))); fut0.listen( new IgniteInClosure<IgniteInternalFuture<Collection<R>>>() { @Override public void apply(IgniteInternalFuture<Collection<R>> fut) { try { onDone(fut.get()); } catch (IgniteCheckedException e) { if (F.isEmpty(nodes)) onDone(e); else init(); } } }); fut = fut0; }
/** @return Remaining node IDs. */ Collection<UUID> remaining() { if (rmtIds == null) return Collections.emptyList(); return F.lose(rmtIds, true, rcvdIds); }
/** * Sets list of routers this client should connect to. * * @param routers List of routers. */ public void setRouters(Collection<String> routers) { this.routers = routers != null ? routers : Collections.<String>emptySet(); }
/** Java client configuration. */ public class GridClientConfiguration { /** Default client protocol. */ public static final GridClientProtocol DFLT_CLIENT_PROTOCOL = GridClientProtocol.TCP; /** Default topology refresh frequency is 2 sec. */ public static final int DFLT_TOP_REFRESH_FREQ = 2000; /** Default maximum time connection can be idle. */ public static final long DFLT_MAX_CONN_IDLE_TIME = 30000; /** Default ping interval in milliseconds. */ public static final long DFLT_PING_INTERVAL = 5000; /** Default ping timeout in milliseconds. */ public static final long DFLT_PING_TIMEOUT = 7000; /** Default connect timeout in milliseconds. */ public static final int DFLT_CONNECT_TIMEOUT = 10000; /** Default flag setting for TCP_NODELAY option. */ public static final boolean DFLT_TCP_NODELAY = true; /** List of servers to connect to. */ private Collection<String> srvs = Collections.emptySet(); /** List of routers to connect to. */ private Collection<String> routers = Collections.emptySet(); /** Client protocol. */ private GridClientProtocol proto = DFLT_CLIENT_PROTOCOL; /** Socket connect timeout. */ private int connectTimeout = DFLT_CONNECT_TIMEOUT; /** TCP_NODELAY flag. */ private boolean tcpNoDelay = DFLT_TCP_NODELAY; /** SSL context factory */ private GridSslContextFactory sslCtxFactory; /** Flag indicating whether metrics cache is enabled. */ private boolean enableMetricsCache = true; /** Flag indicating whether attributes cache is enabled. */ private boolean enableAttrsCache = true; /** Flag indicating whether metrics should be automatically fetched. */ private boolean autoFetchMetrics = true; /** Flag indicating whether attributes should be automatically fetched. */ private boolean autoFetchAttrs = true; /** Topology refresh frequency. */ private long topRefreshFreq = DFLT_TOP_REFRESH_FREQ; /** Max time of connection idleness. */ private long maxConnIdleTime = DFLT_MAX_CONN_IDLE_TIME; /** Ping interval. */ private long pingInterval = DFLT_PING_INTERVAL; /** Ping timeout. */ private long pingTimeout = DFLT_PING_TIMEOUT; /** Default balancer. */ private GridClientLoadBalancer balancer = new GridClientRandomBalancer(); /** Collection of data configurations. */ private Map<String, GridClientDataConfiguration> dataCfgs = Collections.emptyMap(); /** Credentials. */ private SecurityCredentialsProvider credProvider; /** Executor. */ private ExecutorService executor; /** Marshaller. */ private GridClientMarshaller marshaller = new GridClientOptimizedMarshaller(U.allPluginProviders()); /** Daemon flag. */ private boolean daemon; /** Creates default configuration. */ public GridClientConfiguration() { // No-op. } /** * Copy constructor. * * @param cfg Configuration to be copied. */ public GridClientConfiguration(GridClientConfiguration cfg) { // Preserve alphabetical order for maintenance; autoFetchAttrs = cfg.isAutoFetchAttributes(); autoFetchMetrics = cfg.isAutoFetchMetrics(); balancer = cfg.getBalancer(); connectTimeout = cfg.getConnectTimeout(); credProvider = cfg.getSecurityCredentialsProvider(); enableAttrsCache = cfg.isEnableAttributesCache(); enableMetricsCache = cfg.isEnableMetricsCache(); executor = cfg.getExecutorService(); marshaller = cfg.getMarshaller(); maxConnIdleTime = cfg.getMaxConnectionIdleTime(); pingInterval = cfg.getPingInterval(); pingTimeout = cfg.getPingTimeout(); proto = cfg.getProtocol(); routers = cfg.getRouters(); srvs = cfg.getServers(); sslCtxFactory = cfg.getSslContextFactory(); tcpNoDelay = cfg.isTcpNoDelay(); topRefreshFreq = cfg.getTopologyRefreshFrequency(); daemon = cfg.isDaemon(); marshaller = cfg.getMarshaller(); setDataConfigurations(cfg.getDataConfigurations()); } /** * Creates properties-based configuration based on passed in properties. * * @param in Client configuration in properties format. * @throws GridClientException If parsing configuration failed. */ public GridClientConfiguration(Properties in) throws GridClientException { this("ignite.client", in); } /** * Creates properties-based configuration. * * @param prefix Prefix for the client properties. * @param in Properties map to load configuration from. * @throws GridClientException If parsing configuration failed. */ public GridClientConfiguration(String prefix, Properties in) throws GridClientException { load(prefix, in); } /** * Collection of {@code 'host:port'} pairs representing remote grid servers used to establish * initial connection to the grid. Once connection is established, Ignite will get a full view on * grid topology and will be able to connect to any available remote node. * * <p>Note that only these addresses are used to perform topology updates in background and to * detect Grid connectivity status. * * @return Collection of {@code 'host:port'} pairs representing remote grid servers. * @see GridClient#connected() */ public Collection<String> getServers() { return Collections.unmodifiableCollection(srvs); } /** * Collection of {@code 'host:port'} pairs representing grid routers used to establish connection * to the grid. * * <p>Addresses here could be owned by Routers as well as by individual Grid nodes. No additional * connections will be made even if other Grid nodes are available. * * <p>This configuration mode is designated for cases when some Grid nodes are unavailable (due to * security restrictions for example). So only few nodes acting as routers or dedicated router * components used to access entire Grid. * * <p>This configuration parameter will not be used and direct connections to all grid nodes will * be established if {@link #getServers()} return non-empty collection value. * * <p>Note that only these addresses are used to perform topology updates in background and to * detect Grid connectivity status. * * @return Collection of {@code 'host:port'} pairs representing routers. * @see GridClient#connected() */ public Collection<String> getRouters() { return routers; } /** * Sets list of servers this client should connect to. * * @param srvs List of servers. */ public void setServers(Collection<String> srvs) { this.srvs = srvs != null ? srvs : Collections.<String>emptySet(); } /** * Sets list of routers this client should connect to. * * @param routers List of routers. */ public void setRouters(Collection<String> routers) { this.routers = routers != null ? routers : Collections.<String>emptySet(); } /** * Gets protocol for communication between client and remote grid. Default is defined by {@link * #DFLT_CLIENT_PROTOCOL} constant. * * @return Protocol for communication between client and remote grid. */ public GridClientProtocol getProtocol() { return proto; } /** * Sets protocol type that should be used in communication. Protocol type cannot be changed after * client is created. * * @param proto Protocol type. * @see GridClientProtocol */ public void setProtocol(GridClientProtocol proto) { this.proto = proto; } /** * Gets timeout for socket connect operation in milliseconds. If {@code 0} - then wait infinitely. * Default is defined by {@link #DFLT_CONNECT_TIMEOUT} constant. * * @return Connect timeout in milliseconds. */ public int getConnectTimeout() { return connectTimeout; } /** * Gets flag indicating whether {@code TCP_NODELAY} flag should be enabled for outgoing * connections. This flag reduces communication latency and in the majority of cases should be set * to true. For more information, see {@link Socket#setTcpNoDelay(boolean)} * * <p>If not set, default value is {@link #DFLT_TCP_NODELAY} * * @return If {@code TCP_NODELAY} should be set on underlying sockets. */ public boolean isTcpNoDelay() { return tcpNoDelay; } /** * Sets whether {@code TCP_NODELAY} flag should be set on underlying socket connections. * * @param tcpNoDelay {@code True} if flag should be set. */ public void setTcpNoDelay(boolean tcpNoDelay) { this.tcpNoDelay = tcpNoDelay; } /** * Sets timeout for socket connect operation. * * @param connectTimeout Connect timeout in milliseconds. */ public void setConnectTimeout(int connectTimeout) { this.connectTimeout = connectTimeout; } /** * Gets a factory that should be used for SSL context creation. If it returns {@code null} then * SSL is considered disabled. * * @return Factory instance. * @see GridSslContextFactory */ public GridSslContextFactory getSslContextFactory() { return sslCtxFactory; } /** * Sets SSL context factory that will be used for creation of secure connections. * * @param sslCtxFactory Context factory. */ public void setSslContextFactory(GridSslContextFactory sslCtxFactory) { this.sslCtxFactory = sslCtxFactory; } /** * Default balancer to be used for computational client. It can be overridden for different * compute instances. By default {@link GridClientRandomBalancer} is used. * * @return Default balancer to be used for computational client. */ public GridClientLoadBalancer getBalancer() { return balancer; } /** * Sets default compute balancer. * * @param balancer Balancer to use. */ public void setBalancer(GridClientLoadBalancer balancer) { this.balancer = balancer; } /** * Gets client credentials provider to authenticate with. * * @return Credentials provider. */ public SecurityCredentialsProvider getSecurityCredentialsProvider() { return credProvider; } /** * Sets client credentials provider used in authentication process. * * @param credProvider Client credentials provider. */ public void setSecurityCredentialsProvider(SecurityCredentialsProvider credProvider) { this.credProvider = credProvider; } /** * Gets a collection of data configurations specified by user. * * @return Collection of data configurations (possibly empty). */ public Collection<GridClientDataConfiguration> getDataConfigurations() { return dataCfgs.values(); } /** * Sets data configurations. * * @param dataCfgs Data configurations. */ public void setDataConfigurations(Collection<? extends GridClientDataConfiguration> dataCfgs) { this.dataCfgs = U.newHashMap(dataCfgs.size()); for (GridClientDataConfiguration dataCfg : dataCfgs) this.dataCfgs.put(dataCfg.getName(), new GridClientDataConfiguration(dataCfg)); } /** * Gets data configuration for a cache with specified name. * * @param name Name of grid cache. * @return Configuration or {@code null} if there is not configuration for specified name. */ public GridClientDataConfiguration getDataConfiguration(@Nullable String name) { return dataCfgs.get(name); } /** * Sets flag indicating whether node and cache metrics should be cached by client. * * @param enableMetricsCache {@code True} if cache should be enabled. */ public void setEnableMetricsCache(boolean enableMetricsCache) { this.enableMetricsCache = enableMetricsCache; } /** * Enables client to cache per-node and per-cache metrics internally. In memory sensitive * environments, such as mobile platforms, caching metrics may be expensive and, hence, this * parameter should be set to {@code false}. * * <p>Note that topology is refreshed automatically every {@link #getTopologyRefreshFrequency()} * interval, and if {@link #isAutoFetchMetrics()} enabled then metrics will be updated with that * frequency. * * <p>By default this value is {@code true} which means that metrics will be cached on the client * side. * * @return {@code True} if metrics cache is enabled, {@code false} otherwise. */ public boolean isEnableMetricsCache() { return enableMetricsCache; } /** * Sets flag indicating whether node attributes should be cached by client. * * @param enableAttrsCache {@code True} if cache should be enabled. */ public void setEnableAttributesCache(boolean enableAttrsCache) { this.enableAttrsCache = enableAttrsCache; } /** * Enables client to cache per-node attributes internally. In memory sensitive environments, such * as mobile platforms, caching node attributes may be expensive and, hence, this parameter should * be set to {@code false}. * * <p>Note that node attributes are static and, if cached, there is no need to refresh them again. * If {@link #isAutoFetchAttributes()} is enabled then attributes will be cached during client * initialization. * * <p>By default this value is {@code true} which means that node attributes will be cached on the * client side. * * @return {@code True} if attributes cache is enabled, {@code false} otherwise. */ public boolean isEnableAttributesCache() { return enableAttrsCache; } /** * Sets flag indicating whether node metrics should be fetched by client automatically. * * @param autoFetchMetrics {@code True} if metrics should be fetched. */ public void setAutoFetchMetrics(boolean autoFetchMetrics) { this.autoFetchMetrics = autoFetchMetrics; } /** * Allows client to fetch node metrics automatically with background topology refresh. * * <p>Note that this parameter will only affect auto-fetching of node metrics. Cache metrics still * need to be fetched explicitly via {@link GridClientData#metrics()} or {@link * GridClientData#metricsAsync()} methods. * * <p>By default this value is {@code true} which means that metrics will be fetched * automatically. * * @return {@code true} if client should fetch metrics on topology refresh, {@code false} * otherwise. */ public boolean isAutoFetchMetrics() { return autoFetchMetrics; } /** * Sets flag indicating whether node attributes should be fetched by client automatically. * * @param autoFetchAttrs {@code True} if attributes should be fetched. */ public void setAutoFetchAttributes(boolean autoFetchAttrs) { this.autoFetchAttrs = autoFetchAttrs; } /** * Allows client to fetch node attributes automatically with background topology refresh. * * <p>By default this value is {@code true} which means that attributes will be fetched * automatically. * * @return {@code True} if client should fetch attributes once on topology refresh, {@code false} * otherwise. */ public boolean isAutoFetchAttributes() { return autoFetchAttrs; } /** * Gets topology refresh frequency. Default is defined by {@link #DFLT_TOP_REFRESH_FREQ} constant. * * @return Topology refresh frequency. */ public long getTopologyRefreshFrequency() { return topRefreshFreq; } /** * Sets topology refresh frequency. If topology cache is enabled, grid topology will be refreshed * every {@code topRefreshFreq} milliseconds. * * @param topRefreshFreq Topology refresh frequency in milliseconds. */ public void setTopologyRefreshFrequency(long topRefreshFreq) { this.topRefreshFreq = topRefreshFreq; } /** * Gets maximum amount of time that client connection can be idle before it is closed. Default is * defined by {@link #DFLT_MAX_CONN_IDLE_TIME} constant. * * @return Maximum idle time in milliseconds. */ public long getMaxConnectionIdleTime() { return maxConnIdleTime; } /** * Sets maximum time in milliseconds which connection can be idle before it is closed by client. * * @param maxConnIdleTime Maximum time of connection idleness in milliseconds. */ public void setMaxConnectionIdleTime(long maxConnIdleTime) { this.maxConnIdleTime = maxConnIdleTime; } /** * Gets time interval in milliseconds between ping requests. Default is defined by {@link * #DFLT_PING_INTERVAL} constant. * * <p>Ping requests used by {@link GridClientProtocol#TCP} protocol to detect network failures and * half-opened sockets. * * @return Ping interval. */ public long getPingInterval() { return pingInterval; } /** * Sets ping interval in milliseconds. * * @param pingInterval Ping interval in milliseconds. */ public void setPingInterval(long pingInterval) { this.pingInterval = pingInterval; } /** * Gets ping timeout. Default is defined by {@link #DFLT_PING_TIMEOUT} constant. * * <p>Ping requests used by {@link GridClientProtocol#TCP} protocol to detect network failures and * half-opened sockets. If no response received in period equal to this timeout than connection * considered broken and closed. * * @return Ping timeout. */ public long getPingTimeout() { return pingTimeout; } /** * Sets ping timeout in milliseconds. * * @param pingTimeout Ping interval in milliseconds. */ public void setPingTimeout(long pingTimeout) { this.pingTimeout = pingTimeout; } /** * Gets {@link ExecutorService} where client could run asynchronous operations. * * <p>When using {@link GridClientProtocol#TCP} this executor should be able to serve at least * {@code Runtime.getRuntime().availableProcessors()} parallel tasks. * * <p>Note that this executor will be automatically shut down when client get closed. * * @return {@link ExecutorService} instance to use. */ public ExecutorService getExecutorService() { return executor; } /** * Sets executor service. * * @param executor Executor service to use in client. */ public void setExecutorService(ExecutorService executor) { this.executor = executor; } /** * Gets the marshaller, that is used to communicate between client and server. * * <p>Options, that can be used out-of-the-box: * * <ul> * <li>{@link GridClientOptimizedMarshaller} (default) - Ignite's optimized marshaller. * <li>{@code GridClientPortableMarshaller} - Marshaller that supports portable objects. * <li>{@link org.apache.ignite.internal.client.marshaller.jdk.GridClientJdkMarshaller} - JDK * marshaller (not recommended). * </ul> * * @return A marshaller to use. */ public GridClientMarshaller getMarshaller() { return marshaller; } /** * Sets the marshaller to use for communication. * * @param marshaller A marshaller to use. */ public void setMarshaller(GridClientMarshaller marshaller) { this.marshaller = marshaller; } /** * Load client configuration from the properties map. * * @param prefix Prefix for the client properties. * @param in Properties map to load configuration from. * @throws GridClientException If parsing configuration failed. */ public void load(String prefix, Properties in) throws GridClientException { while (prefix.endsWith(".")) prefix = prefix.substring(0, prefix.length() - 1); if (!prefix.isEmpty()) prefix += "."; String balancer = in.getProperty(prefix + "balancer"); String connectTimeout = in.getProperty(prefix + "connectTimeout"); String cred = in.getProperty(prefix + "credentials"); String autoFetchMetrics = in.getProperty(prefix + "autoFetchMetrics"); String autoFetchAttrs = in.getProperty(prefix + "autoFetchAttributes"); String maxConnIdleTime = in.getProperty(prefix + "idleTimeout"); String proto = in.getProperty(prefix + "protocol"); String srvrs = in.getProperty(prefix + "servers"); String tcpNoDelay = in.getProperty(prefix + "tcp.noDelay"); String topRefreshFreq = in.getProperty(prefix + "topology.refresh"); String sslEnabled = in.getProperty(prefix + "ssl.enabled"); String sslProto = in.getProperty(prefix + "ssl.protocol", "TLS"); String sslKeyAlg = in.getProperty(prefix + "ssl.key.algorithm", "SunX509"); String keyStorePath = in.getProperty(prefix + "ssl.keystore.location"); String keyStorePwd = in.getProperty(prefix + "ssl.keystore.password"); String keyStoreType = in.getProperty(prefix + "ssl.keystore.type"); String trustStorePath = in.getProperty(prefix + "ssl.truststore.location"); String trustStorePwd = in.getProperty(prefix + "ssl.truststore.password"); String trustStoreType = in.getProperty(prefix + "ssl.truststore.type"); String dataCfgs = in.getProperty(prefix + "data.configurations"); setBalancer(resolveBalancer(balancer)); if (!F.isEmpty(connectTimeout)) setConnectTimeout(Integer.parseInt(connectTimeout)); if (!F.isEmpty(cred)) { int idx = cred.indexOf(':'); if (idx >= 0 && idx < cred.length() - 1) { setSecurityCredentialsProvider( new SecurityCredentialsBasicProvider( new SecurityCredentials(cred.substring(0, idx), cred.substring(idx + 1)))); } else { setSecurityCredentialsProvider( new SecurityCredentialsBasicProvider(new SecurityCredentials(null, null, cred))); } } if (!F.isEmpty(autoFetchMetrics)) setAutoFetchMetrics(Boolean.parseBoolean(autoFetchMetrics)); if (!F.isEmpty(autoFetchAttrs)) setAutoFetchAttributes(Boolean.parseBoolean(autoFetchAttrs)); if (!F.isEmpty(maxConnIdleTime)) setMaxConnectionIdleTime(Integer.parseInt(maxConnIdleTime)); if (!F.isEmpty(proto)) setProtocol(GridClientProtocol.valueOf(proto)); if (!F.isEmpty(srvrs)) setServers(Arrays.asList(srvrs.replaceAll("\\s+", "").split(","))); if (!F.isEmpty(tcpNoDelay)) setTcpNoDelay(Boolean.parseBoolean(tcpNoDelay)); if (!F.isEmpty(topRefreshFreq)) setTopologyRefreshFrequency(Long.parseLong(topRefreshFreq)); // // SSL configuration section // if (!F.isEmpty(sslEnabled) && Boolean.parseBoolean(sslEnabled)) { GridSslBasicContextFactory factory = new GridSslBasicContextFactory(); factory.setProtocol(F.isEmpty(sslProto) ? "TLS" : sslProto); factory.setKeyAlgorithm(F.isEmpty(sslKeyAlg) ? "SunX509" : sslKeyAlg); if (F.isEmpty(keyStorePath)) throw new IllegalArgumentException("SSL key store location is not specified."); factory.setKeyStoreFilePath(keyStorePath); if (keyStorePwd != null) factory.setKeyStorePassword(keyStorePwd.toCharArray()); factory.setKeyStoreType(F.isEmpty(keyStoreType) ? "jks" : keyStoreType); if (F.isEmpty(trustStorePath)) factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager()); else { factory.setTrustStoreFilePath(trustStorePath); if (trustStorePwd != null) factory.setTrustStorePassword(trustStorePwd.toCharArray()); factory.setTrustStoreType(F.isEmpty(trustStoreType) ? "jks" : trustStoreType); } setSslContextFactory(factory); } // // Data configuration section // if (!F.isEmpty(dataCfgs)) { String[] names = dataCfgs.replaceAll("\\s+", "").split(","); Collection<GridClientDataConfiguration> list = new ArrayList<>(); for (String cfgName : names) { if (F.isEmpty(cfgName)) continue; String name = in.getProperty(prefix + "data." + cfgName + ".name"); String bal = in.getProperty(prefix + "data." + cfgName + ".balancer"); String aff = in.getProperty(prefix + "data." + cfgName + ".affinity"); GridClientDataConfiguration dataCfg = new GridClientDataConfiguration(); dataCfg.setName(F.isEmpty(name) ? null : name); dataCfg.setBalancer(resolveBalancer(bal)); dataCfg.setAffinity(resolveAffinity(aff)); list.add(dataCfg); } setDataConfigurations(list); } } /** * Resolve load balancer from string definition. * * @param balancer Load balancer string definition. * @return Resolved load balancer. * @throws GridClientException If loading failed. */ private static GridClientLoadBalancer resolveBalancer(String balancer) throws GridClientException { if (F.isEmpty(balancer) || "random".equals(balancer)) return new GridClientRandomBalancer(); if ("roundrobin".equals(balancer)) return new GridClientRoundRobinBalancer(); return newInstance(GridClientLoadBalancer.class, balancer); } /** * Resolve data affinity from string definition. * * @param affinity Data affinity string definition. * @return Resolved data affinity. * @throws GridClientException If loading failed. */ private static GridClientDataAffinity resolveAffinity(String affinity) throws GridClientException { if (F.isEmpty(affinity)) return null; if ("partitioned".equals(affinity)) return new GridClientPartitionAffinity(); return newInstance(GridClientDataAffinity.class, affinity); } /** * Constructs new instance of the specified class. * * @param exp Expected class for the new instance. * @param clsName Class name to create new instance for. * @param <T> Expected class type for the new instance. * @return New instance of specified class. * @throws GridClientException If loading failed. */ private static <T> T newInstance(Class<T> exp, String clsName) throws GridClientException { Object obj; try { obj = Class.forName(clsName).newInstance(); } // Catch all for convenience. catch (Exception e) { throw new GridClientException("Failed to create class instance: " + clsName, e); } return exp.cast(obj); } /** * Set the daemon flag value. Communication threads will be created as daemons if this flag is * set. * * @param daemon Daemon flag. */ public void setDaemon(boolean daemon) { this.daemon = daemon; } /** * Get the daemon flag. * * @return Daemon flag. */ public boolean isDaemon() { return daemon; } }
/** * Grabs local events and detects if events was lost since last poll. * * @param ignite Target grid. * @param evtOrderKey Unique key to take last order key from node local map. * @param evtThrottleCntrKey Unique key to take throttle count from node local map. * @param evtTypes Event types to collect. * @param evtMapper Closure to map grid events to Visor data transfer objects. * @return Collections of node events */ public static Collection<VisorGridEvent> collectEvents( Ignite ignite, String evtOrderKey, String evtThrottleCntrKey, final int[] evtTypes, IgniteClosure<Event, VisorGridEvent> evtMapper) { assert ignite != null; assert evtTypes != null && evtTypes.length > 0; ConcurrentMap<String, Long> nl = ignite.cluster().nodeLocalMap(); final long lastOrder = getOrElse(nl, evtOrderKey, -1L); final long throttle = getOrElse(nl, evtThrottleCntrKey, 0L); // When we first time arrive onto a node to get its local events, // we'll grab only last those events that not older than given period to make sure we are // not grabbing GBs of data accidentally. final long notOlderThan = System.currentTimeMillis() - EVENTS_COLLECT_TIME_WINDOW; // Flag for detecting gaps between events. final AtomicBoolean lastFound = new AtomicBoolean(lastOrder < 0); IgnitePredicate<Event> p = new IgnitePredicate<Event>() { /** */ private static final long serialVersionUID = 0L; @Override public boolean apply(Event e) { // Detects that events were lost. if (!lastFound.get() && (lastOrder == e.localOrder())) lastFound.set(true); // Retains events by lastOrder, period and type. return e.localOrder() > lastOrder && e.timestamp() > notOlderThan && F.contains(evtTypes, e.type()); } }; Collection<Event> evts = ignite.events().localQuery(p); // Update latest order in node local, if not empty. if (!evts.isEmpty()) { Event maxEvt = Collections.max(evts, EVTS_ORDER_COMPARATOR); nl.put(evtOrderKey, maxEvt.localOrder()); } // Update throttle counter. if (!lastFound.get()) nl.put(evtThrottleCntrKey, throttle == 0 ? EVENTS_LOST_THROTTLE : throttle - 1); boolean lost = !lastFound.get() && throttle == 0; Collection<VisorGridEvent> res = new ArrayList<>(evts.size() + (lost ? 1 : 0)); if (lost) res.add(new VisorGridEventsLost(ignite.cluster().localNode().id())); for (Event e : evts) { VisorGridEvent visorEvt = evtMapper.apply(e); if (visorEvt != null) res.add(visorEvt); } return res; }
/** * Updates value for single partition. * * @param p Partition. * @param nodeId Node ID. * @param state State. * @param updateSeq Update sequence. */ @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"}) private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) { assert lock.isWriteLockedByCurrentThread(); assert nodeId.equals(cctx.nodeId()); // In case if node joins, get topology at the time of joining node. ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer); assert oldest != null; // If this node became the oldest node. if (oldest.id().equals(cctx.nodeId())) { long seq = node2part.updateSequence(); if (seq != updateSeq) { if (seq > updateSeq) { if (this.updateSeq.get() < seq) { // Update global counter if necessary. boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1); assert b : "Invalid update sequence [updateSeq=" + updateSeq + ", seq=" + seq + ", curUpdateSeq=" + this.updateSeq.get() + ", node2part=" + node2part.toFullString() + ']'; updateSeq = seq + 1; } else updateSeq = seq; } node2part.updateSequence(updateSeq); } } GridDhtPartitionMap map = node2part.get(nodeId); if (map == null) node2part.put( nodeId, map = new GridDhtPartitionMap( nodeId, updateSeq, Collections.<Integer, GridDhtPartitionState>emptyMap(), false)); map.updateSequence(updateSeq); map.put(p, state); Set<UUID> ids = part2node.get(p); if (ids == null) part2node.put(p, ids = U.newHashSet(3)); ids.add(nodeId); }
/** * Sets list of servers this client should connect to. * * @param srvs List of servers. */ public void setServers(Collection<String> srvs) { this.srvs = srvs != null ? srvs : Collections.<String>emptySet(); }
/** * Starts activity. * * @throws IgniteInterruptedCheckedException If interrupted. */ public void init() throws IgniteInterruptedCheckedException { if (isDone()) return; if (init.compareAndSet(false, true)) { if (isDone()) return; try { // Wait for event to occur to make sure that discovery // will return corresponding nodes. U.await(evtLatch); assert discoEvt != null : this; assert !dummy && !forcePreload : this; ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion()); oldestNode.set(oldest); startCaches(); // True if client node joined or failed. boolean clientNodeEvt; if (F.isEmpty(reqs)) { int type = discoEvt.type(); assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED : discoEvt; clientNodeEvt = CU.clientNode(discoEvt.eventNode()); } else { assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt; boolean clientOnlyStart = true; for (DynamicCacheChangeRequest req : reqs) { if (!req.clientStartOnly()) { clientOnlyStart = false; break; } } clientNodeEvt = clientOnlyStart; } if (clientNodeEvt) { ClusterNode node = discoEvt.eventNode(); // Client need to initialize affinity for local join event or for stated client caches. if (!node.isLocal()) { for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) { initTopology(cacheCtx); top.beforeExchange(this); } else cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion()); } if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); onDone(exchId.topologyVersion()); skipPreload = cctx.kernalContext().clientNode(); return; } } if (cctx.kernalContext().clientNode()) { skipPreload = true; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; GridDhtPartitionTopology top = cacheCtx.topology(); top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId())); } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; initTopology(cacheCtx); } if (oldestNode.get() != null) { rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); sendPartitions(); } else onDone(exchId.topologyVersion()); return; } assert oldestNode.get() != null; for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) { if (cacheCtx .discovery() .cacheAffinityNodes(cacheCtx.name(), topologyVersion()) .isEmpty()) U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex()); } cacheCtx.preloader().onExchangeFutureAdded(); } List<String> cachesWithoutNodes = null; if (exchId.isLeft()) { for (String name : cctx.cache().cacheNames()) { if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) { if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>(); cachesWithoutNodes.add(name); // Fire event even if there is no client cache started. if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) { Event evt = new CacheEvent( name, cctx.localNode(), cctx.localNode(), "All server nodes have left the cluster.", EventType.EVT_CACHE_NODES_LEFT, 0, false, null, null, null, null, false, null, false, null, null, null); cctx.gridEvents().record(evt); } } } } if (cachesWithoutNodes != null) { StringBuilder sb = new StringBuilder( "All server nodes for the following caches have left the cluster: "); for (int i = 0; i < cachesWithoutNodes.size(); i++) { String cache = cachesWithoutNodes.get(i); sb.append('\'').append(cache).append('\''); if (i != cachesWithoutNodes.size() - 1) sb.append(", "); } U.quietAndWarn(log, sb.toString()); U.quietAndWarn(log, "Must have server nodes for caches to operate."); } assert discoEvt != null; assert exchId.nodeId().equals(discoEvt.eventNode().id()); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(cacheCtx.cacheId()); long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence(); // Update before waiting for locks. if (!cacheCtx.isLocal()) cacheCtx .topology() .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId())); } // Grab all alive remote nodes with order of equal or less than last joined node. rmtNodes = new ConcurrentLinkedQueue<>( CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion())); rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes))); for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet()) // If received any messages, process them. onReceive(m.getKey(), m.getValue()); AffinityTopologyVersion topVer = exchId.topologyVersion(); for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Must initialize topology after we get discovery event. initTopology(cacheCtx); cacheCtx.preloader().updateLastExchangeFuture(this); } IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer); // Assign to class variable so it will be included into toString() method. this.partReleaseFut = partReleaseFut; if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this); while (true) { try { partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { // Print pending transactions and locks that might have led to hang. dumpPendingObjects(); } } if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this); if (!F.isEmpty(reqs)) blockGateways(); if (exchId.isLeft()) cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion()); IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion()); while (true) { try { locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS); break; } catch (IgniteFutureTimeoutCheckedException ignored) { U.warn( log, "Failed to wait for locks release future. " + "Dumping pending objects that might be the cause: " + cctx.localNodeId()); U.warn(log, "Locked entries:"); Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks = cctx.mvcc().unfinishedLocks(exchId.topologyVersion()); for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet()) U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']'); } } for (GridCacheContext cacheCtx : cctx.cacheContexts()) { if (cacheCtx.isLocal()) continue; // Notify replication manager. GridCacheContext drCacheCtx = cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx; if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft()); // Partition release future is done so we can flush the write-behind store. cacheCtx.store().forceFlush(); // Process queued undeploys prior to sending/spreading map. cacheCtx.preloader().unwindUndeploys(); GridDhtPartitionTopology top = cacheCtx.topology(); assert topVer.equals(top.topologyVersion()) : "Topology version is updated only in this class instances inside single ExchangeWorker thread."; top.beforeExchange(this); } for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) { top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId())); top.beforeExchange(this); } } catch (IgniteInterruptedCheckedException e) { onDone(e); throw e; } catch (Throwable e) { U.error( log, "Failed to reinitialize local partitions (preloading will be stopped): " + exchId, e); onDone(e); if (e instanceof Error) throw (Error) e; return; } if (F.isEmpty(rmtIds)) { onDone(exchId.topologyVersion()); return; } ready.set(true); initFut.onDone(true); if (log.isDebugEnabled()) log.debug("Initialized future: " + this); // If this node is not oldest. if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions(); else { boolean allReceived = allReceived(); if (allReceived && replied.compareAndSet(false, true)) { if (spreadPartitions()) onDone(exchId.topologyVersion()); } } scheduleRecheck(); } else assert false : "Skipped init future: " + this; }
/** * @param updateSeq Update sequence. * @return Checks if any of the local partitions need to be evicted. */ private boolean checkEvictions(long updateSeq) { assert lock.isWriteLockedByCurrentThread(); boolean changed = false; UUID locId = cctx.nodeId(); for (GridDhtLocalPartition part : locParts.values()) { GridDhtPartitionState state = part.state(); if (state.active()) { int p = part.id(); List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer); if (!affNodes.contains(cctx.localNode())) { Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING)); // If all affinity nodes are owners, then evict partition from local node. if (nodeIds.containsAll(F.nodeIds(affNodes))) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug("Evicted local partition (all affinity nodes are owners): " + part); } else { int ownerCnt = nodeIds.size(); int affCnt = affNodes.size(); if (ownerCnt > affCnt) { List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds)); // Sort by node orders in ascending order. Collections.sort(sorted, CU.nodeComparator(true)); int diff = sorted.size() - affCnt; for (int i = 0; i < diff; i++) { ClusterNode n = sorted.get(i); if (locId.equals(n.id())) { part.rent(false); updateLocal(part.id(), locId, part.state(), updateSeq); changed = true; if (log.isDebugEnabled()) log.debug( "Evicted local partition (this node is oldest non-affinity node): " + part); break; } } } } } } } return changed; }
/** * Collection of {@code 'host:port'} pairs representing remote grid servers used to establish * initial connection to the grid. Once connection is established, Ignite will get a full view on * grid topology and will be able to connect to any available remote node. * * <p>Note that only these addresses are used to perform topology updates in background and to * detect Grid connectivity status. * * @return Collection of {@code 'host:port'} pairs representing remote grid servers. * @see GridClient#connected() */ public Collection<String> getServers() { return Collections.unmodifiableCollection(srvs); }