/** * Method cleans up all events that either outnumber queue size or exceeds time-to-live value. It * does none if someone else cleans up queue (lock is locked) or if there are queue readers * (readersNum > 0). */ private void cleanupQueue() { long now = U.currentTimeMillis(); long queueOversize = evts.sizex() - expireCnt; for (int i = 0; i < queueOversize && evts.sizex() > expireCnt; i++) { GridEvent expired = evts.poll(); if (log.isDebugEnabled()) log.debug("Event expired by count: " + expired); } while (true) { ConcurrentLinkedDeque8.Node<GridEvent> node = evts.peekx(); if (node == null) // Queue is empty. break; GridEvent evt = node.item(); if (evt == null) // Competing with another thread. continue; if (now - evt.timestamp() < expireAgeMs) break; if (evts.unlinkx(node) && log.isDebugEnabled()) log.debug("Event expired by age: " + node.item()); } }
/** {@inheritDoc} */ @Override public void put(@Nullable GridCacheTx tx, K key, @Nullable V val) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store put [key=" + key + ", val=" + val + ", tx=" + tx + ']'); if (val == null) { remove(tx, key); return; } Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = new GridCacheHibernateBlobStoreEntry(toBytes(key), toBytes(val)); ses.saveOrUpdate(entry); } catch (HibernateException e) { rollback(ses, tx); throw new GridException( "Failed to put value to cache store [key=" + key + ", val" + val + "]", e); } finally { end(ses, tx); } }
/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, GridClosure<String, Boolean> authChecker, GridLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** {@inheritDoc} */ @Override public void spiStop() throws GridSpiException { unregisterMBean(); // Ack ok stop. if (log.isDebugEnabled()) log.debug(stopInfo()); }
/** @param log Logger. */ private void execute(GridLogger log) { try { log.info("Started execute."); // Countdown shared job latch so that the main thread know that all jobs are // inside the "execute" routine. jobLatch.countDown(); log.info("After job latch."); // Await for the main thread to allow jobs to proceed. latch.await(); log.info("After latch."); if (awaitMasterLeaveCallback) { latch0.await(); log.info("After latch0."); } else log.info("Latch 0 skipped."); } catch (InterruptedException e) { // We do not expect any interruptions here, hence this statement. fail("Unexpected exception: " + e); } }
/** {@inheritDoc} */ @SuppressWarnings({"CatchGenericClass"}) @Override public final void run() { try { body(); } catch (InterruptedException e) { if (log.isDebugEnabled()) { log.debug("Caught interrupted exception: " + e); } } // Catch everything to make sure that it gets logged properly and // not to kill any threads from the underlying thread pool. catch (Throwable e) { U.error(log, "Runtime error caught during grid runnable execution: " + this, e); } finally { cleanup(); if (log.isDebugEnabled()) { if (isInterrupted()) { log.debug( "Grid runnable finished due to interruption without cancellation: " + getName()); } else { log.debug("Grid runnable finished normally: " + getName()); } } } }
/** * Completeness callback. * * @param success {@code True} if lock was acquired. * @param distribute {@code True} if need to distribute lock removal in case of failure. * @return {@code True} if complete by this operation. */ private boolean onComplete(boolean success, boolean distribute) { if (log.isDebugEnabled()) log.debug( "Received onComplete(..) callback [success=" + success + ", distribute=" + distribute + ", fut=" + this + ']'); if (!success) undoLocks(distribute); if (tx != null) cctx.tm().txContext(tx); if (super.onDone(success, err.get())) { if (log.isDebugEnabled()) log.debug("Completing future: " + this); // Clean up. cctx.mvcc().removeFuture(this); if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj); return true; } return false; }
/** * @param cancel {@code True} to close with cancellation. * @throws GridException If failed. */ @Override public void close(boolean cancel) throws GridException { if (!closed.compareAndSet(false, true)) return; busyLock.block(); if (log.isDebugEnabled()) log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']'); GridException e = null; try { // Assuming that no methods are called on this loader after this method is called. if (cancel) { cancelled = true; for (Buffer buf : bufMappings.values()) buf.cancelAll(); } else doFlush(); ctx.event().removeLocalEventListener(discoLsnr); ctx.io().removeMessageListener(topic); } catch (GridException e0) { e = e0; } fut.onDone(null, e); if (e != null) throw e; }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked", "RedundantTypeArguments"}) @Override public V load(@Nullable GridCacheTx tx, K key) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store load [key=" + key + ", tx=" + tx + ']'); Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = (GridCacheHibernateBlobStoreEntry) ses.get(GridCacheHibernateBlobStoreEntry.class, toBytes(key)); if (entry == null) return null; return fromBytes(entry.getValue()); } catch (HibernateException e) { rollback(ses, tx); throw new GridException("Failed to load value from cache store with key: " + key, e); } finally { end(ses, tx); } }
/** {@inheritDoc} */ @Override public void txEnd(GridCacheTx tx, boolean commit) throws GridException { init(); Session ses = tx.removeMeta(ATTR_SES); if (ses != null) { Transaction hTx = ses.getTransaction(); if (hTx != null) { try { if (commit) { ses.flush(); hTx.commit(); } else hTx.rollback(); if (log.isDebugEnabled()) log.debug("Transaction ended [xid=" + tx.xid() + ", commit=" + commit + ']'); } catch (HibernateException e) { throw new GridException( "Failed to end transaction [xid=" + tx.xid() + ", commit=" + commit + ']', e); } finally { ses.close(); } } } }
/** * Gets Hibernate session. * * @param tx Cache transaction. * @return Session. */ Session session(@Nullable GridCacheTx tx) { Session ses; if (tx != null) { ses = tx.meta(ATTR_SES); if (ses == null) { ses = sesFactory.openSession(); ses.beginTransaction(); // Store session in transaction metadata, so it can be accessed // for other operations on the same transaction. tx.addMeta(ATTR_SES, ses); if (log.isDebugEnabled()) log.debug("Hibernate session open [ses=" + ses + ", tx=" + tx.xid() + "]"); } } else { ses = sesFactory.openSession(); ses.beginTransaction(); } return ses; }
/** {@inheritDoc} */ @Override protected Collection<GridComputeJobAdapter> split(int gridSize, Object arg) throws GridException { assert rsrc1 != null; assert rsrc2 != null; assert rsrc3 != null; assert rsrc4 != null; assert log != null; log.info("Injected shared resource1 into task: " + rsrc1); log.info("Injected shared resource2 into task: " + rsrc2); log.info("Injected shared resource3 into task: " + rsrc3); log.info("Injected shared resource4 into task: " + rsrc4); log.info("Injected log resource into task: " + log); task1Rsrc1 = rsrc1; task1Rsrc2 = rsrc2; task1Rsrc3 = rsrc3; task1Rsrc4 = rsrc4; Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); for (int i = 0; i < gridSize; i++) { jobs.add(new GridSharedJob1()); } return jobs; }
/** {@inheritDoc} */ @Override public Serializable reduce(List<GridComputeJobResult> results) throws GridException { if (log.isInfoEnabled()) log.info("Reducing job [job=" + this + ", results=" + results + ']'); if (results.size() > 1) fail(); return results.get(0).getData(); }
/** {@inheritDoc} */ @Nullable @Override public GridDeployment explicitDeploy(Class<?> cls, ClassLoader clsLdr) throws GridException { if (log.isDebugEnabled()) log.debug("Ignoring explicit deploy [cls=" + cls + ", clsLdr=" + clsLdr + ']'); return null; }
/** {@inheritDoc} */ @Override public void spiStart(String gridName) throws GridSpiException { assertParameter(parallelJobsNum > 0, "parallelJobsNum > 0"); assertParameter(waitJobsNum >= 0, "waitingJobsNum >= 0"); assertParameter(taskAttrKey != null, "taskAttrKey != null"); assertParameter(jobAttrKey != null, "jobAttrKey != null"); // Start SPI start stopwatch. startStopwatch(); // Ack parameters. if (log.isDebugEnabled()) { log.debug(configInfo("parallelJobsNum", parallelJobsNum)); log.debug(configInfo("taskAttrKey", taskAttrKey)); log.debug(configInfo("jobAttrKey", jobAttrKey)); log.debug(configInfo("dfltPriority", dfltPriority)); log.debug(configInfo("starvationInc", starvationInc)); log.debug(configInfo("preventStarvation", preventStarvation)); } registerMBean(gridName, this, GridPriorityQueueCollisionSpiMBean.class); // Ack start. if (log.isDebugEnabled()) log.debug(startInfo()); }
/** * Gets job priority. At first tries to get from job context. If job context has no priority, then * tries to get from task session. If task session has no priority default one will be used. * * @param ctx Collision job context. * @return Job priority. */ private int getJobPriority(GridCollisionJobContext ctx) { assert ctx != null; Integer p = null; GridJobContext jctx = ctx.getJobContext(); try { p = (Integer) jctx.getAttribute(jobAttrKey); } catch (ClassCastException e) { LT.error( log, e, "Type of job context priority attribute '" + jobAttrKey + "' is not java.lang.Integer [type=" + jctx.getAttribute(jobAttrKey).getClass() + ']'); } if (p == null) { GridTaskSession ses = ctx.getTaskSession(); try { p = (Integer) ses.getAttribute(taskAttrKey); } catch (ClassCastException e) { LT.error( log, e, "Type of task session priority attribute '" + taskAttrKey + "' is not java.lang.Integer [type=" + ses.getAttribute(taskAttrKey).getClass() + ']'); } if (p == null) { if (log.isDebugEnabled()) { log.debug( "Failed get priority from job context attribute '" + jobAttrKey + "' and task session attribute '" + taskAttrKey + "' (will use default priority): " + dfltPriority); } p = dfltPriority; } } assert p != null; return p; }
/** {@inheritDoc} */ @SuppressWarnings("BusyWait") @Override public Boolean reduce(List<GridComputeJobResult> results) throws GridException { assert taskSes != null; assert results != null; assert params != null; assert !params.isEmpty(); assert results.size() == params.size(); Map<String, Integer> receivedParams = new HashMap<>(); boolean allAttrReceived = false; int cnt = 0; while (!allAttrReceived && cnt++ < 3) { allAttrReceived = true; for (Map.Entry<String, Integer> entry : params.entrySet()) { assert taskSes.getAttribute(entry.getKey()) != null; Integer newVal = (Integer) taskSes.getAttribute(entry.getKey()); assert newVal != null; receivedParams.put(entry.getKey(), newVal); if (newVal != entry.getValue() + 1) allAttrReceived = false; } if (!allAttrReceived) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new GridException("Thread interrupted.", e); } } } if (log.isDebugEnabled()) { for (Map.Entry<String, Integer> entry : receivedParams.entrySet()) { log.debug( "Received session attr value [name=" + entry.getKey() + ", val=" + entry.getValue() + ", expected=" + (params.get(entry.getKey()) + 1) + ']'); } } return allAttrReceived; }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { if (log.isInfoEnabled()) log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']'); Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT); for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i)); return jobs; }
/** {@inheritDoc} */ @Override public Serializable execute() { int arg = this.<Integer>argument(0); try { if (log.isInfoEnabled()) log.info("Executing job [job=" + this + ", arg=" + arg + ']'); startSignal.countDown(); try { if (!startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS)) fail(); if (arg == 1) { if (log.isInfoEnabled()) log.info("Job one is proceeding."); } else Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { if (log.isInfoEnabled()) log.info("Job got cancelled [arg=" + arg + ", ses=" + ses + ", e=" + e + ']'); return 0; } if (log.isInfoEnabled()) log.info("Completing job: " + ses); return argument(0); } finally { stopSignal.countDown(); processedCnt++; } }
/** {@inheritDoc} */ @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, String arg) throws GridException { assert locNodeId != null; if (log.isInfoEnabled()) { log.info("Mapping jobs [subgrid=" + subgrid + ", arg=" + arg + ']'); } GridNode remoteNode = null; for (GridNode node : subgrid) { if (!node.id().equals(locNodeId)) { remoteNode = node; } } return Collections.singletonMap( new GridComputeJobAdapter(locNodeId) { /** */ @GridLocalNodeIdResource private UUID nodeId; /** {@inheritDoc} */ @SuppressWarnings("NakedNotify") @Override public Serializable execute() throws GridException { assert nodeId != null; if (!nodeId.equals(argument(0))) { try { synchronized (mux) { mux.notifyAll(); } Thread.sleep(Integer.MAX_VALUE); } catch (InterruptedException e) { throw new GridComputeExecutionRejectedException( "Expected interruption during execution.", e); } } else { return "success"; } throw new GridComputeExecutionRejectedException( "Expected exception during execution."); } }, remoteNode); }
/** {@inheritDoc} */ @Override public void record(GridEvent evt) throws GridSpiException { assert evt != null; // Filter out events. if (filter == null || filter.apply(evt)) { cleanupQueue(); evts.add(evt); // Make sure to filter out metrics updates to prevent log from flooding. if (evt.type() != EVT_NODE_METRICS_UPDATED && log.isDebugEnabled()) log.debug("Event recorded: " + evt); } }
/** * @param rmtReducer Optional reducer. * @param rmtTransform Optional transformer. * @param args Arguments. * @return Future. */ @SuppressWarnings("IfMayBeConditional") private <R> GridCacheQueryFuture<R> execute( @Nullable GridReducer<T, R> rmtReducer, @Nullable GridClosure<T, R> rmtTransform, @Nullable Object... args) { Collection<GridNode> nodes = nodes(); cctx.checkSecurity(GridSecurityPermission.CACHE_READ); if (F.isEmpty(nodes)) return new GridCacheQueryErrorFuture<>( cctx.kernalContext(), new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx())); if (log.isDebugEnabled()) log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']'); if (cctx.deploymentEnabled()) { try { cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform); cctx.deploy().registerClasses(args); } catch (GridException e) { return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e); } } if (subjId == null) subjId = cctx.localNodeId(); taskHash = cctx.kernalContext().job().currentTaskNameHash(); GridCacheQueryBean bean = new GridCacheQueryBean( this, (GridReducer<Object, Object>) rmtReducer, (GridClosure<Object, Object>) rmtTransform, args); GridCacheQueryManager qryMgr = cctx.queries(); boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId()); if (type == SQL_FIELDS) return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes)); else return (GridCacheQueryFuture<R>) (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes)); }
/** @throws GridException If operation failed. */ private void initializeLatch() throws GridException { if (initGuard.compareAndSet(false, true)) { try { internalLatch = CU.outTx( new Callable<CountDownLatch>() { @Override public CountDownLatch call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue val = latchView.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return new CountDownLatch(cnt); } tx.commit(); return new CountDownLatch(val.get()); } finally { tx.end(); } } }, ctx); if (log.isDebugEnabled()) log.debug("Initialized internal latch: " + internalLatch); } finally { initLatch.countDown(); } } else { try { initLatch.await(); } catch (InterruptedException ignored) { throw new GridException("Thread has been interrupted."); } if (internalLatch == null) throw new GridException("Internal latch has not been properly initialized."); } }
/** * @param log Logger. * @param job Actual job. */ private void onMasterLeave(GridLogger log, Object job) { log.info("Callback executed: " + job); latch0.countDown(); invokeLatch.countDown(); }
/** {@inheritDoc} */ @Override public void handle(String target, Request req, HttpServletRequest srvReq, HttpServletResponse res) throws IOException, ServletException { if (log.isDebugEnabled()) log.debug("Handling request [target=" + target + ", req=" + req + ", srvReq=" + srvReq + ']'); if (target.startsWith("/gridgain")) { processRequest(target, srvReq, res); req.setHandled(true); } else if (target.startsWith("/favicon.ico")) { if (favicon == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("image/x-icon"); res.getOutputStream().write(favicon); res.getOutputStream().flush(); req.setHandled(true); } else { if (dfltPage == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("text/html"); res.getWriter().write(dfltPage); res.getWriter().flush(); req.setHandled(true); } }
/** * @param nodeId Sender. * @param res Result. */ void onResult(UUID nodeId, GridNearLockResponse<K, V> res) { if (!isDone()) { if (log.isDebugEnabled()) log.debug( "Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); for (GridFuture<Boolean> fut : pending()) { if (isMini(fut)) { MiniFuture mini = (MiniFuture) fut; if (mini.futureId().equals(res.miniId())) { assert mini.node().id().equals(nodeId); if (log.isDebugEnabled()) log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']'); mini.onResult(res); if (log.isDebugEnabled()) log.debug( "Future after processed lock response [fut=" + this + ", mini=" + mini + ", res=" + res + ']'); return; } } } U.warn( log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res + ", fut=" + this + ']'); } else if (log.isDebugEnabled()) log.debug( "Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); }
/** * @param cached Entry to check. * @return {@code True} if filter passed. */ private boolean filter(GridCacheEntryEx<K, V> cached) { try { if (!cctx.isAll(cached, filter)) { if (log.isDebugEnabled()) log.debug("Filter didn't pass for entry (will fail lock): " + cached); onFailed(true); return false; } return true; } catch (GridException e) { onError(e); return false; } }
/** {@inheritDoc} */ @Override public void spiStart(String gridName) throws GridSpiException { // Start SPI start stopwatch. startStopwatch(); assertParameter(expireCnt > 0, "expireCnt > 0"); assertParameter(expireAgeMs > 0, "expireAgeMs > 0"); // Ack parameters. if (log.isDebugEnabled()) { log.debug(configInfo("expireAgeMs", expireAgeMs)); log.debug(configInfo("expireCnt", expireCnt)); } registerMBean(gridName, this, GridMemoryEventStorageSpiMBean.class); // Ack ok start. if (log.isDebugEnabled()) log.debug(startInfo()); }
/** {@inheritDoc} */ @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, Integer arg) throws GridException { assert taskSes != null; assert arg != null; assert arg > 0; Map<GridSessionLoadTestJob, GridNode> map = new HashMap<>(subgrid.size()); Iterator<GridNode> iter = subgrid.iterator(); Random rnd = new Random(); params = new HashMap<>(arg); Collection<UUID> assigned = new ArrayList<>(subgrid.size()); for (int i = 0; i < arg; i++) { // Recycle iterator. if (!iter.hasNext()) iter = subgrid.iterator(); String paramName = UUID.randomUUID().toString(); int paramVal = rnd.nextInt(); taskSes.setAttribute(paramName, paramVal); GridNode node = iter.next(); assigned.add(node.id()); map.put(new GridSessionLoadTestJob(paramName), node); params.put(paramName, paramVal); if (log.isDebugEnabled()) log.debug("Set session attribute [name=" + paramName + ", value=" + paramVal + ']'); } taskSes.setAttribute("nodes", assigned); return map; }
/** * Adds entry to future. * * @param topVer Topology version. * @param entry Entry to add. * @param dhtNodeId DHT node ID. * @return Lock candidate. * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable private GridCacheMvccCandidate<K> addEntry( long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId) throws GridCacheEntryRemovedException { // Check if lock acquisition is timed out. if (timedOut) return null; // Add local lock first, as it may throw GridCacheEntryRemovedException. GridCacheMvccCandidate<K> c = entry.addNearLocal( dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx()); if (inTx()) { GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key()); txEntry.cached(entry, txEntry.keyBytes()); } if (c != null) c.topologyVersion(topVer); synchronized (mux) { entries.add(entry); } if (c == null && timeout < 0) { if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry); onFailed(false); return null; } // Double check if lock acquisition has already timed out. if (timedOut) { entry.removeLock(lockVer); return null; } return c; }