/** {@inheritDoc} */ @Override public Serializable reduce(List<GridComputeJobResult> results) throws GridException { if (log.isInfoEnabled()) log.info("Reducing job [job=" + this + ", results=" + results + ']'); if (results.size() > 1) fail(); return results.get(0).getData(); }
/** * Increases priority if job has bumped down. * * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting * for execution. * @param passiveJobs Reordered collection of collision contexts for waiting jobs. */ private void bumpPriority( Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) { assert waitJobs != null; assert passiveJobs != null; assert waitJobs.size() == passiveJobs.size(); for (int i = 0; i < passiveJobs.size(); i++) { GridCollisionJobContext ctx = passiveJobs.get(i); if (i > indexOf(waitJobs, ctx)) ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc); } }
/** {@inheritDoc} */ @Override public void onCollision( Collection<GridCollisionJobContext> waitJobs, Collection<GridCollisionJobContext> activeJobs) { assert waitJobs != null; assert activeJobs != null; int activeSize = F.size(activeJobs, RUNNING_JOBS); waitingCnt.set(waitJobs.size()); runningCnt.set(activeSize); heldCnt.set(activeJobs.size() - activeSize); int waitSize = waitJobs.size(); int activateCnt = parallelJobsNum - activeSize; if (activateCnt > 0 && !waitJobs.isEmpty()) { if (waitJobs.size() <= activateCnt) { for (GridCollisionJobContext waitJob : waitJobs) { waitJob.activate(); waitSize--; } } else { List<GridCollisionJobContext> passiveList = new ArrayList<GridCollisionJobContext>(waitJobs); Collections.sort( passiveList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); if (preventStarvation) bumpPriority(waitJobs, passiveList); for (int i = 0; i < activateCnt; i++) { passiveList.get(i).activate(); waitSize--; } } } if (waitSize > waitJobsNum) { List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs); // Put jobs with highest priority first. Collections.sort( waitList, new Comparator<GridCollisionJobContext>() { /** {@inheritDoc} */ @Override public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) { int p1 = getJobPriority(o1); int p2 = getJobPriority(o2); return p1 < p2 ? 1 : p1 == p2 ? 0 : -1; } }); int skip = waitJobs.size() - waitSize; int i = 0; for (GridCollisionJobContext waitCtx : waitList) { if (++i >= skip) { waitCtx.cancel(); if (--waitSize <= waitJobsNum) break; } } } }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }
/** {@inheritDoc} */ @Override public String reduce(List<GridComputeJobResult> results) throws GridException { assert results.size() == 1; return results.get(0).getData(); }
/** * This method is called to map or split grid task into multiple grid jobs. This is the first * method that gets called when task execution starts. * * @param data Task execution argument. Can be {@code null}. This is the same argument as the one * passed into {@code Grid#execute(...)} methods. * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed * to be randomized by container. This ensures that every time you simply iterate through grid * nodes, the order of nodes will be random which over time should result into all nodes being * used equally. * @return Map of grid jobs assigned to subgrid node. Unless {@link * GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is * returned, exception will be thrown. * @throws GridException If mapping could not complete successfully. This exception will be thrown * out of {@link GridComputeTaskFuture#get()} method. */ @Override public Map<? extends GridComputeJob, GridNode> map( List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException { assert !subgrid.isEmpty(); // Give preference to wanted node. Otherwise, take the first one. GridNode targetNode = F.find( subgrid, subgrid.get(0), new GridPredicate<GridNode>() { @Override public boolean apply(GridNode e) { return preferredNode.equals(e.id()); } }); return Collections.singletonMap( new GridComputeJobAdapter() { @GridLoggerResource private GridLogger log; @GridInstanceResource private Grid grid; @Override public Object execute() throws GridException { log.info("Going to put data: " + data.size()); GridCache<Object, Object> cache = grid.cache(cacheName); assert cache != null; Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data); for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) { T2<Integer, Collection<Integer>> pair = entry.getValue(); Object affKey = pair.get1(); // Group lock partition. try (GridCacheTx tx = cache.txStartPartition( cache.affinity().partition(affKey), optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ, 0, pair.get2().size())) { for (Integer val : pair.get2()) cache.put(val, val); tx.commit(); } } log.info("Finished put data: " + data.size()); return data; } /** * Groups values by partitions. * * @param data Data to put. * @return Grouped map. */ private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) { GridCache<Object, Object> cache = grid.cache(cacheName); Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>(); for (Integer val : data) { int part = cache.affinity().partition(val); T2<Integer, Collection<Integer>> tup = res.get(part); if (tup == null) { tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>()); res.put(part, tup); } tup.get2().add(val); } return res; } }, targetNode); }