/** {@inheritDoc} */ @Override public final Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, T arg) { assert subgrid != null; assert !subgrid.isEmpty(); Collection<? extends ComputeJob> jobs = split(subgrid.size(), arg); if (F.isEmpty(jobs)) throw new IgniteException("Split returned no jobs."); Map<ComputeJob, ClusterNode> map = U.newHashMap(jobs.size()); for (ComputeJob job : jobs) { ClusterNode old = map.put(job, balancer.getBalancedNode(job, null)); if (old != null) throw new IgniteException( "Failed to map task (same job instance is being mapped more than once) " + "[job=" + job + ", task=" + this + ']'); } return map; }
/** {@inheritDoc} */ @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException { super.finishUnmarshal(ctx, ldr); if (writes != null) unmarshalTx(writes, false, ctx, ldr); if (reads != null) unmarshalTx(reads, false, ctx, ldr); if (grpLockKeyBytes != null && grpLockKey == null) grpLockKey = ctx.marshaller().unmarshal(grpLockKeyBytes, ldr); if (dhtVerKeys != null && dhtVers == null) { assert dhtVerVals != null; assert dhtVerKeys.size() == dhtVerVals.size(); Iterator<IgniteTxKey> keyIt = dhtVerKeys.iterator(); Iterator<GridCacheVersion> verIt = dhtVerVals.iterator(); dhtVers = U.newHashMap(dhtVerKeys.size()); while (keyIt.hasNext()) { IgniteTxKey key = keyIt.next(); key.finishUnmarshal(ctx.cacheContext(key.cacheId()), ldr); dhtVers.put(key, verIt.next()); } } if (txNodesBytes != null) txNodes = ctx.marshaller().unmarshal(txNodesBytes, ldr); }
/** * Data streamer should correctly load entries from HashMap in case of grids with more than one * node and with GridOptimizedMarshaller that requires serializable. * * @throws Exception If failed. */ public void testAddDataFromMap() throws Exception { cnt = 0; startGrids(2); Ignite g0 = grid(0); IgniteDataStreamer<Integer, String> dataLdr = g0.dataStreamer(null); Map<Integer, String> map = U.newHashMap(KEYS_COUNT); for (int i = 0; i < KEYS_COUNT; i++) map.put(i, String.valueOf(i)); dataLdr.addData(map); dataLdr.close(); Random rnd = new Random(); IgniteCache<Integer, String> c = g0.cache(null); for (int i = 0; i < KEYS_COUNT; i++) { Integer k = rnd.nextInt(KEYS_COUNT); String v = c.get(k); assertEquals(k.toString(), v); } }
/** * Set source nodes. * * @param nodes Nodes. */ public void setSources(Collection<ClusterNode> nodes) { assert remainingRows == null; remainingRows = U.newHashMap(nodes.size()); for (ClusterNode node : nodes) { if (remainingRows.put(node.id(), new Counter()) != null) throw new IllegalStateException("Duplicate node id: " + node.id()); } }
/** * Parses HTTP parameters in an appropriate format and return back map of values to predefined * list of names. * * @param req Request. * @return Map of parsed parameters. */ @SuppressWarnings({"unchecked"}) private Map<String, Object> parameters(ServletRequest req) { Map<String, String[]> params = req.getParameterMap(); if (F.isEmpty(params)) return Collections.emptyMap(); Map<String, Object> map = U.newHashMap(params.size()); for (Map.Entry<String, String[]> entry : params.entrySet()) map.put(entry.getKey(), parameter(entry.getValue())); return map; }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public <M extends Map<?, ?>> M readMap( MessageCollectionItemType keyType, MessageCollectionItemType valType, boolean linked, MessageReader reader) { if (readSize == -1) { int size = readInt(); if (!lastFinished) return null; readSize = size; } if (readSize >= 0) { if (map == null) map = linked ? U.newLinkedHashMap(readSize) : U.newHashMap(readSize); for (int i = readItems; i < readSize; i++) { if (!keyDone) { Object key = read(keyType, reader); if (!lastFinished) return null; mapCur = key; keyDone = true; } Object val = read(valType, reader); if (!lastFinished) return null; map.put(mapCur, val); keyDone = false; readItems++; } } readSize = -1; readItems = 0; mapCur = null; M map0 = (M) map; map = null; return map0; }
/** {@inheritDoc} */ @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException { super.finishUnmarshal(ctx, ldr); if (ownedValKeys != null && ownedVals == null) { ownedVals = U.newHashMap(ownedValKeys.size()); assert ownedValKeys.size() == ownedValVals.size(); Iterator<IgniteTxKey> keyIter = ownedValKeys.iterator(); Iterator<CacheVersionedValue> valIter = ownedValVals.iterator(); while (keyIter.hasNext()) { IgniteTxKey key = keyIter.next(); GridCacheContext cctx = ctx.cacheContext(key.cacheId()); CacheVersionedValue val = valIter.next(); key.finishUnmarshal(cctx, ldr); val.finishUnmarshal(cctx, ldr); ownedVals.put(key, val); } } if (retVal != null && retVal.cacheId() != 0) { GridCacheContext cctx = ctx.cacheContext(retVal.cacheId()); assert cctx != null : retVal.cacheId(); retVal.finishUnmarshal(cctx, ldr); } if (filterFailedKeys != null) { for (IgniteTxKey key : filterFailedKeys) { GridCacheContext cctx = ctx.cacheContext(key.cacheId()); key.finishUnmarshal(cctx, ldr); } } }
/** {@inheritDoc} */ @Override public Map<Integer, BinaryType> metadata(Collection<Integer> typeIds) throws BinaryObjectException { try { Collection<PortableMetadataKey> keys = new ArrayList<>(typeIds.size()); for (Integer typeId : typeIds) keys.add(new PortableMetadataKey(typeId)); Map<PortableMetadataKey, BinaryMetadata> meta = metaDataCache.getAll(keys); Map<Integer, BinaryType> res = U.newHashMap(meta.size()); for (Map.Entry<PortableMetadataKey, BinaryMetadata> e : meta.entrySet()) res.put(e.getKey().typeId(), e.getValue().wrap(portableCtx)); return res; } catch (CacheException e) { throw new BinaryObjectException(e); } }
/** * @param cctx Cache context. * @param completionCb Callback to invoke when future is completed. * @param writeVer Write version. * @param updateReq Update request. * @param updateRes Update response. */ public GridDhtAtomicUpdateFuture( GridCacheContext cctx, CI2<GridNearAtomicUpdateRequest, GridNearAtomicUpdateResponse> completionCb, GridCacheVersion writeVer, GridNearAtomicUpdateRequest updateReq, GridNearAtomicUpdateResponse updateRes) { this.cctx = cctx; this.writeVer = writeVer; futVer = cctx.versions().next(updateReq.topologyVersion()); this.updateReq = updateReq; this.completionCb = completionCb; this.updateRes = updateRes; if (log == null) log = U.logger(cctx.kernalContext(), logRef, GridDhtAtomicUpdateFuture.class); keys = new ArrayList<>(updateReq.keys().size()); mappings = U.newHashMap(updateReq.keys().size()); boolean topLocked = updateReq.topologyLocked() || (updateReq.fastMap() && !updateReq.clientRequest()); waitForExchange = !topLocked; }
/** * Creates REST request. * * @param cmd Command. * @param params Parameters. * @param req Servlet request. * @return REST request. * @throws IgniteCheckedException If creation failed. */ @Nullable private GridRestRequest createRequest( GridRestCommand cmd, Map<String, Object> params, HttpServletRequest req) throws IgniteCheckedException { GridRestRequest restReq; switch (cmd) { case GET_OR_CREATE_CACHE: case DESTROY_CACHE: { GridRestCacheRequest restReq0 = new GridRestCacheRequest(); restReq0.cacheName((String) params.get("cacheName")); restReq = restReq0; break; } case ATOMIC_DECREMENT: case ATOMIC_INCREMENT: { DataStructuresRequest restReq0 = new DataStructuresRequest(); restReq0.key(params.get("key")); restReq0.initial(longValue("init", params, null)); restReq0.delta(longValue("delta", params, null)); restReq = restReq0; break; } case CACHE_CONTAINS_KEY: case CACHE_CONTAINS_KEYS: case CACHE_GET: case CACHE_GET_ALL: case CACHE_GET_AND_PUT: case CACHE_GET_AND_REPLACE: case CACHE_PUT_IF_ABSENT: case CACHE_GET_AND_PUT_IF_ABSENT: case CACHE_PUT: case CACHE_PUT_ALL: case CACHE_REMOVE: case CACHE_REMOVE_VALUE: case CACHE_REPLACE_VALUE: case CACHE_GET_AND_REMOVE: case CACHE_REMOVE_ALL: case CACHE_ADD: case CACHE_CAS: case CACHE_METRICS: case CACHE_SIZE: case CACHE_METADATA: case CACHE_REPLACE: case CACHE_APPEND: case CACHE_PREPEND: { GridRestCacheRequest restReq0 = new GridRestCacheRequest(); String cacheName = (String) params.get("cacheName"); restReq0.cacheName(F.isEmpty(cacheName) ? null : cacheName); restReq0.key(params.get("key")); restReq0.value(params.get("val")); restReq0.value2(params.get("val2")); Object val1 = params.get("val1"); if (val1 != null) restReq0.value(val1); restReq0.cacheFlags(intValue("cacheFlags", params, 0)); restReq0.ttl(longValue("exp", params, null)); if (cmd == CACHE_GET_ALL || cmd == CACHE_PUT_ALL || cmd == CACHE_REMOVE_ALL || cmd == CACHE_CONTAINS_KEYS) { List<Object> keys = values("k", params); List<Object> vals = values("v", params); if (keys.size() < vals.size()) throw new IgniteCheckedException( "Number of keys must be greater or equals to number of values."); Map<Object, Object> map = U.newHashMap(keys.size()); Iterator<Object> keyIt = keys.iterator(); Iterator<Object> valIt = vals.iterator(); while (keyIt.hasNext()) map.put(keyIt.next(), valIt.hasNext() ? valIt.next() : null); restReq0.values(map); } restReq = restReq0; break; } case TOPOLOGY: case NODE: { GridRestTopologyRequest restReq0 = new GridRestTopologyRequest(); restReq0.includeMetrics(Boolean.parseBoolean((String) params.get("mtr"))); restReq0.includeAttributes(Boolean.parseBoolean((String) params.get("attr"))); restReq0.nodeIp((String) params.get("ip")); restReq0.nodeId(uuidValue("id", params)); restReq = restReq0; break; } case EXE: case RESULT: case NOOP: { GridRestTaskRequest restReq0 = new GridRestTaskRequest(); restReq0.taskId((String) params.get("id")); restReq0.taskName((String) params.get("name")); restReq0.params(values("p", params)); restReq0.async(Boolean.parseBoolean((String) params.get("async"))); restReq0.timeout(longValue("timeout", params, 0L)); restReq = restReq0; break; } case LOG: { GridRestLogRequest restReq0 = new GridRestLogRequest(); restReq0.path((String) params.get("path")); restReq0.from(intValue("from", params, -1)); restReq0.to(intValue("to", params, -1)); restReq = restReq0; break; } case NAME: case VERSION: { restReq = new GridRestRequest(); break; } case EXECUTE_SQL_QUERY: case EXECUTE_SQL_FIELDS_QUERY: { RestQueryRequest restReq0 = new RestQueryRequest(); restReq0.sqlQuery((String) params.get("qry")); restReq0.arguments(values("arg", params).toArray()); restReq0.typeName((String) params.get("type")); String pageSize = (String) params.get("pageSize"); if (pageSize != null) restReq0.pageSize(Integer.parseInt(pageSize)); restReq0.cacheName((String) params.get("cacheName")); if (cmd == EXECUTE_SQL_QUERY) restReq0.queryType(RestQueryRequest.QueryType.SQL); else restReq0.queryType(RestQueryRequest.QueryType.SQL_FIELDS); restReq = restReq0; break; } case EXECUTE_SCAN_QUERY: { RestQueryRequest restReq0 = new RestQueryRequest(); restReq0.sqlQuery((String) params.get("qry")); String pageSize = (String) params.get("pageSize"); if (pageSize != null) restReq0.pageSize(Integer.parseInt(pageSize)); restReq0.cacheName((String) params.get("cacheName")); restReq0.className((String) params.get("className")); restReq0.queryType(RestQueryRequest.QueryType.SCAN); restReq = restReq0; break; } case FETCH_SQL_QUERY: { RestQueryRequest restReq0 = new RestQueryRequest(); String qryId = (String) params.get("qryId"); if (qryId != null) restReq0.queryId(Long.parseLong(qryId)); String pageSize = (String) params.get("pageSize"); if (pageSize != null) restReq0.pageSize(Integer.parseInt(pageSize)); restReq0.cacheName((String) params.get("cacheName")); restReq = restReq0; break; } case CLOSE_SQL_QUERY: { RestQueryRequest restReq0 = new RestQueryRequest(); String qryId = (String) params.get("qryId"); if (qryId != null) restReq0.queryId(Long.parseLong(qryId)); restReq0.cacheName((String) params.get("cacheName")); restReq = restReq0; break; } default: throw new IgniteCheckedException("Invalid command: " + cmd); } restReq.address(new InetSocketAddress(req.getRemoteAddr(), req.getRemotePort())); restReq.command(cmd); if (params.containsKey("ignite.login") || params.containsKey("ignite.password")) { SecurityCredentials cred = new SecurityCredentials( (String) params.get("ignite.login"), (String) params.get("ignite.password")); restReq.credentials(cred); } String clientId = (String) params.get("clientId"); try { if (clientId != null) restReq.clientId(UUID.fromString(clientId)); } catch (Exception ignored) { // Ignore invalid client id. Rest handler will process this logic. } String destId = (String) params.get("destId"); try { if (destId != null) restReq.destinationId(UUID.fromString(destId)); } catch (IllegalArgumentException ignored) { // Don't fail - try to execute locally. } String sesTokStr = (String) params.get("sessionToken"); try { if (sesTokStr != null) restReq.sessionToken(U.hexString2ByteArray(sesTokStr)); } catch (IllegalArgumentException ignored) { // Ignore invalid session token. } return restReq; }
/** * @param keys Keys. * @param mapped Mappings to check for duplicates. * @param topVer Topology version on which keys should be mapped. */ private void map( Collection<KeyCacheObject> keys, Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mapped, AffinityTopologyVersion topVer) { Collection<ClusterNode> cacheNodes = CU.affinityNodes(cctx, topVer); if (cacheNodes.isEmpty()) { onDone( new ClusterTopologyServerNotFoundException( "Failed to map keys for cache " + "(all partition nodes left the grid) [topVer=" + topVer + ", cache=" + cctx.name() + ']')); return; } Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings = U.newHashMap(cacheNodes.size()); final int keysSize = keys.size(); Map<K, V> locVals = U.newHashMap(keysSize); boolean hasRmtNodes = false; // Assign keys to primary nodes. for (KeyCacheObject key : keys) hasRmtNodes |= map(key, mappings, locVals, topVer, mapped); if (isDone()) return; if (!locVals.isEmpty()) add(new GridFinishedFuture<>(locVals)); if (hasRmtNodes) { if (!trackable) { trackable = true; cctx.mvcc().addFuture(this, futId); } } // Create mini futures. for (Map.Entry<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> entry : mappings.entrySet()) { final ClusterNode n = entry.getKey(); final LinkedHashMap<KeyCacheObject, Boolean> mappedKeys = entry.getValue(); assert !mappedKeys.isEmpty(); // If this is the primary or backup node for the keys. if (n.isLocal()) { final GridDhtFuture<Collection<GridCacheEntryInfo>> fut = cache() .getDhtAsync( n.id(), -1, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc, skipVals); final Collection<Integer> invalidParts = fut.invalidPartitions(); if (!F.isEmpty(invalidParts)) { Collection<KeyCacheObject> remapKeys = new ArrayList<>(keysSize); for (KeyCacheObject key : keys) { if (key != null && invalidParts.contains(cctx.affinity().partition(key))) remapKeys.add(key); } AffinityTopologyVersion updTopVer = cctx.discovery().topologyVersionEx(); assert updTopVer.compareTo(topVer) > 0 : "Got invalid partitions for local node but topology version did " + "not change [topVer=" + topVer + ", updTopVer=" + updTopVer + ", invalidParts=" + invalidParts + ']'; // Remap recursively. map(remapKeys, mappings, updTopVer); } // Add new future. add( fut.chain( new C1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>, Map<K, V>>() { @Override public Map<K, V> apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> fut) { try { return createResultMap(fut.get()); } catch (Exception e) { U.error(log, "Failed to get values from dht cache [fut=" + fut + "]", e); onDone(e); return Collections.emptyMap(); } } })); } else { MiniFuture fut = new MiniFuture(n, mappedKeys, topVer); GridCacheMessage req = new GridNearGetRequest( cctx.cacheId(), futId, fut.futureId(), n.version().compareTo(SINGLE_GET_MSG_SINCE) >= 0 ? null : DUMMY_VER, mappedKeys, readThrough, topVer, subjId, taskName == null ? 0 : taskName.hashCode(), expiryPlc != null ? expiryPlc.forAccess() : -1L, skipVals, cctx.deploymentEnabled()); add(fut); // Append new future. try { cctx.io().send(n, req, cctx.ioPolicy()); } catch (IgniteCheckedException e) { // Fail the whole thing. if (e instanceof ClusterTopologyCheckedException) fut.onNodeLeft((ClusterTopologyCheckedException) e); else fut.onResult(e); } } } }
/** * Sets data configurations. * * @param dataCfgs Data configurations. */ public void setDataConfigurations(Collection<? extends GridClientDataConfiguration> dataCfgs) { this.dataCfgs = U.newHashMap(dataCfgs.size()); for (GridClientDataConfiguration dataCfg : dataCfgs) this.dataCfgs.put(dataCfg.getName(), new GridClientDataConfiguration(dataCfg)); }