/** * Completeness callback. * * @param success {@code True} if lock was acquired. * @param distribute {@code True} if need to distribute lock removal in case of failure. * @return {@code True} if complete by this operation. */ private boolean onComplete(boolean success, boolean distribute) { if (log.isDebugEnabled()) log.debug( "Received onComplete(..) callback [success=" + success + ", distribute=" + distribute + ", fut=" + this + ']'); if (!success) undoLocks(distribute); if (tx != null) cctx.tm().txContext(tx); if (super.onDone(success, err.get())) { if (log.isDebugEnabled()) log.debug("Completing future: " + this); // Clean up. cctx.mvcc().removeFuture(this); if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj); return true; } return false; }
/** * @param cancel {@code True} to close with cancellation. * @throws GridException If failed. */ @Override public void close(boolean cancel) throws GridException { if (!closed.compareAndSet(false, true)) return; busyLock.block(); if (log.isDebugEnabled()) log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']'); GridException e = null; try { // Assuming that no methods are called on this loader after this method is called. if (cancel) { cancelled = true; for (Buffer buf : bufMappings.values()) buf.cancelAll(); } else doFlush(); ctx.event().removeLocalEventListener(discoLsnr); ctx.io().removeMessageListener(topic); } catch (GridException e0) { e = e0; } fut.onDone(null, e); if (e != null) throw e; }
/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, GridClosure<String, Boolean> authChecker, GridLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** {@inheritDoc} */ @SuppressWarnings({"unchecked", "RedundantTypeArguments"}) @Override public V load(@Nullable GridCacheTx tx, K key) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store load [key=" + key + ", tx=" + tx + ']'); Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = (GridCacheHibernateBlobStoreEntry) ses.get(GridCacheHibernateBlobStoreEntry.class, toBytes(key)); if (entry == null) return null; return fromBytes(entry.getValue()); } catch (HibernateException e) { rollback(ses, tx); throw new GridException("Failed to load value from cache store with key: " + key, e); } finally { end(ses, tx); } }
/** * Gets Hibernate session. * * @param tx Cache transaction. * @return Session. */ Session session(@Nullable GridCacheTx tx) { Session ses; if (tx != null) { ses = tx.meta(ATTR_SES); if (ses == null) { ses = sesFactory.openSession(); ses.beginTransaction(); // Store session in transaction metadata, so it can be accessed // for other operations on the same transaction. tx.addMeta(ATTR_SES, ses); if (log.isDebugEnabled()) log.debug("Hibernate session open [ses=" + ses + ", tx=" + tx.xid() + "]"); } } else { ses = sesFactory.openSession(); ses.beginTransaction(); } return ses; }
/** {@inheritDoc} */ @Override public void txEnd(GridCacheTx tx, boolean commit) throws GridException { init(); Session ses = tx.removeMeta(ATTR_SES); if (ses != null) { Transaction hTx = ses.getTransaction(); if (hTx != null) { try { if (commit) { ses.flush(); hTx.commit(); } else hTx.rollback(); if (log.isDebugEnabled()) log.debug("Transaction ended [xid=" + tx.xid() + ", commit=" + commit + ']'); } catch (HibernateException e) { throw new GridException( "Failed to end transaction [xid=" + tx.xid() + ", commit=" + commit + ']', e); } finally { ses.close(); } } } }
/** {@inheritDoc} */ @Override protected Collection<GridComputeJobAdapter> split(int gridSize, Object arg) throws GridException { assert rsrc1 != null; assert rsrc2 != null; assert rsrc3 != null; assert rsrc4 != null; assert log != null; log.info("Injected shared resource1 into task: " + rsrc1); log.info("Injected shared resource2 into task: " + rsrc2); log.info("Injected shared resource3 into task: " + rsrc3); log.info("Injected shared resource4 into task: " + rsrc4); log.info("Injected log resource into task: " + log); task1Rsrc1 = rsrc1; task1Rsrc2 = rsrc2; task1Rsrc3 = rsrc3; task1Rsrc4 = rsrc4; Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); for (int i = 0; i < gridSize; i++) { jobs.add(new GridSharedJob1()); } return jobs; }
/** {@inheritDoc} */ @Override public void put(@Nullable GridCacheTx tx, K key, @Nullable V val) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store put [key=" + key + ", val=" + val + ", tx=" + tx + ']'); if (val == null) { remove(tx, key); return; } Session ses = session(tx); try { GridCacheHibernateBlobStoreEntry entry = new GridCacheHibernateBlobStoreEntry(toBytes(key), toBytes(val)); ses.saveOrUpdate(entry); } catch (HibernateException e) { rollback(ses, tx); throw new GridException( "Failed to put value to cache store [key=" + key + ", val" + val + "]", e); } finally { end(ses, tx); } }
/** {@inheritDoc} */ @Override public Serializable reduce(List<GridComputeJobResult> results) throws GridException { if (log.isInfoEnabled()) log.info("Reducing job [job=" + this + ", results=" + results + ']'); if (results.size() > 1) fail(); return results.get(0).getData(); }
/** {@inheritDoc} */ @Override protected Collection<? extends GridComputeJob> split(int gridSize, Object arg) throws GridException { if (log.isInfoEnabled()) log.info("Splitting job [job=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']'); Collection<GridComputeJob> jobs = new ArrayList<>(SPLIT_COUNT); for (int i = 1; i <= SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob(i)); return jobs; }
/** {@inheritDoc} */ @Override public Serializable execute() { int arg = this.<Integer>argument(0); try { if (log.isInfoEnabled()) log.info("Executing job [job=" + this + ", arg=" + arg + ']'); startSignal.countDown(); try { if (!startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS)) fail(); if (arg == 1) { if (log.isInfoEnabled()) log.info("Job one is proceeding."); } else Thread.sleep(WAIT_TIME); } catch (InterruptedException e) { if (log.isInfoEnabled()) log.info("Job got cancelled [arg=" + arg + ", ses=" + ses + ", e=" + e + ']'); return 0; } if (log.isInfoEnabled()) log.info("Completing job: " + ses); return argument(0); } finally { stopSignal.countDown(); processedCnt++; } }
/** {@inheritDoc} */ @Override public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, String arg) throws GridException { assert locNodeId != null; if (log.isInfoEnabled()) { log.info("Mapping jobs [subgrid=" + subgrid + ", arg=" + arg + ']'); } GridNode remoteNode = null; for (GridNode node : subgrid) { if (!node.id().equals(locNodeId)) { remoteNode = node; } } return Collections.singletonMap( new GridComputeJobAdapter(locNodeId) { /** */ @GridLocalNodeIdResource private UUID nodeId; /** {@inheritDoc} */ @SuppressWarnings("NakedNotify") @Override public Serializable execute() throws GridException { assert nodeId != null; if (!nodeId.equals(argument(0))) { try { synchronized (mux) { mux.notifyAll(); } Thread.sleep(Integer.MAX_VALUE); } catch (InterruptedException e) { throw new GridComputeExecutionRejectedException( "Expected interruption during execution.", e); } } else { return "success"; } throw new GridComputeExecutionRejectedException( "Expected exception during execution."); } }, remoteNode); }
/** @throws GridException If operation failed. */ private void initializeLatch() throws GridException { if (initGuard.compareAndSet(false, true)) { try { internalLatch = CU.outTx( new Callable<CountDownLatch>() { @Override public CountDownLatch call() throws Exception { GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ); try { GridCacheCountDownLatchValue val = latchView.get(key); if (val == null) { if (log.isDebugEnabled()) log.debug("Failed to find count down latch with given name: " + name); assert cnt == 0; return new CountDownLatch(cnt); } tx.commit(); return new CountDownLatch(val.get()); } finally { tx.end(); } } }, ctx); if (log.isDebugEnabled()) log.debug("Initialized internal latch: " + internalLatch); } finally { initLatch.countDown(); } } else { try { initLatch.await(); } catch (InterruptedException ignored) { throw new GridException("Thread has been interrupted."); } if (internalLatch == null) throw new GridException("Internal latch has not been properly initialized."); } }
/** * @param nodeId Sender. * @param res Result. */ void onResult(UUID nodeId, GridNearLockResponse<K, V> res) { if (!isDone()) { if (log.isDebugEnabled()) log.debug( "Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); for (GridFuture<Boolean> fut : pending()) { if (isMini(fut)) { MiniFuture mini = (MiniFuture) fut; if (mini.futureId().equals(res.miniId())) { assert mini.node().id().equals(nodeId); if (log.isDebugEnabled()) log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']'); mini.onResult(res); if (log.isDebugEnabled()) log.debug( "Future after processed lock response [fut=" + this + ", mini=" + mini + ", res=" + res + ']'); return; } } } U.warn( log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res + ", fut=" + this + ']'); } else if (log.isDebugEnabled()) log.debug( "Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); }
/** {@inheritDoc} */ @Override public void handle(String target, Request req, HttpServletRequest srvReq, HttpServletResponse res) throws IOException, ServletException { if (log.isDebugEnabled()) log.debug("Handling request [target=" + target + ", req=" + req + ", srvReq=" + srvReq + ']'); if (target.startsWith("/gridgain")) { processRequest(target, srvReq, res); req.setHandled(true); } else if (target.startsWith("/favicon.ico")) { if (favicon == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("image/x-icon"); res.getOutputStream().write(favicon); res.getOutputStream().flush(); req.setHandled(true); } else { if (dfltPage == null) { res.setStatus(HttpServletResponse.SC_NOT_FOUND); req.setHandled(true); return; } res.setStatus(HttpServletResponse.SC_OK); res.setContentType("text/html"); res.getWriter().write(dfltPage); res.getWriter().flush(); req.setHandled(true); } }
/** * @param cached Entry to check. * @return {@code True} if filter passed. */ private boolean filter(GridCacheEntryEx<K, V> cached) { try { if (!cctx.isAll(cached, filter)) { if (log.isDebugEnabled()) log.debug("Filter didn't pass for entry (will fail lock): " + cached); onFailed(true); return false; } return true; } catch (GridException e) { onError(e); return false; } }
/** * Adds entry to future. * * @param topVer Topology version. * @param entry Entry to add. * @param dhtNodeId DHT node ID. * @return Lock candidate. * @throws GridCacheEntryRemovedException If entry was removed. */ @Nullable private GridCacheMvccCandidate<K> addEntry( long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId) throws GridCacheEntryRemovedException { // Check if lock acquisition is timed out. if (timedOut) return null; // Add local lock first, as it may throw GridCacheEntryRemovedException. GridCacheMvccCandidate<K> c = entry.addNearLocal( dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx()); if (inTx()) { GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key()); txEntry.cached(entry, txEntry.keyBytes()); } if (c != null) c.topologyVersion(topVer); synchronized (mux) { entries.add(entry); } if (c == null && timeout < 0) { if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry); onFailed(false); return null; } // Double check if lock acquisition has already timed out. if (timedOut) { entry.removeLock(lockVer); return null; } return c; }
/** {@inheritDoc} */ @Override public boolean onDone(Boolean success, Throwable err) { if (log.isDebugEnabled()) log.debug( "Received onDone(..) callback [success=" + success + ", err=" + err + ", fut=" + this + ']'); // If locks were not acquired yet, delay completion. if (isDone() || (err == null && success && !checkLocks())) return false; this.err.compareAndSet(null, err instanceof GridCacheLockTimeoutException ? null : err); if (err != null) success = false; return onComplete(success, true); }
/** {@inheritDoc} */ @SuppressWarnings({"JpaQueryApiInspection", "JpaQlInspection"}) @Override public void remove(@Nullable GridCacheTx tx, K key) throws GridException { init(); if (log.isDebugEnabled()) log.debug("Store remove [key=" + key + ", tx=" + tx + ']'); Session ses = session(tx); try { Object obj = ses.get(GridCacheHibernateBlobStoreEntry.class, toBytes(key)); if (obj != null) ses.delete(obj); } catch (HibernateException e) { rollback(ses, tx); throw new GridException("Failed to remove value from cache store with key: " + key, e); } finally { end(ses, tx); } }
/** * @param nodeId Left node ID * @return {@code True} if node was in the list. */ @SuppressWarnings({"ThrowableInstanceNeverThrown"}) @Override public boolean onNodeLeft(UUID nodeId) { boolean found = false; for (GridFuture<?> fut : futures()) { if (isMini(fut)) { MiniFuture f = (MiniFuture) fut; if (f.node().id().equals(nodeId)) { if (log.isDebugEnabled()) log.debug( "Found mini-future for left node [nodeId=" + nodeId + ", mini=" + f + ", fut=" + this + ']'); f.onResult(newTopologyException(null, nodeId)); found = true; } } } if (!found) { if (log.isDebugEnabled()) log.debug( "Near lock future does not have mapping for left node (ignoring) [nodeId=" + nodeId + ", fut=" + this + ']'); } return found; }
/** * Checks availability of a classpath resource. * * @param name Resource name. * @return {@code true} if resource is available and ready for read, {@code false} otherwise. */ private boolean resourceAvailable(String name) { InputStream cfgStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(name); if (cfgStream == null) { log.error("Classpath resource not found: " + name); return false; } try { // Read a single byte to force actual content access by JVM. cfgStream.read(); return true; } catch (IOException e) { log.error("Failed to read classpath resource: " + name, e); return false; } finally { U.close(cfgStream, log); } }
/** {@inheritDoc} */ @Override public boolean onDone(@Nullable GridCacheCommittedTxInfo<K, V> res, @Nullable Throwable err) { if (super.onDone(res, err)) { cctx.mvcc().removeFuture(this); if (log.isDebugEnabled()) log.debug( "Completing check committed tx future for transaction [tx=" + tx + ", res=" + res + ", err=" + err + ']'); if (err == null) cctx.tm().finishPessimisticTxOnRecovery(tx, res); else { if (log.isDebugEnabled()) log.debug( "Failed to check prepared transactions, " + "invalidating transaction [err=" + err + ", tx=" + tx + ']'); if (nearCheck) return true; cctx.tm().salvageTx(tx); } return true; } return false; }
/** @return {@code True} if locks have been acquired. */ private boolean checkLocks() { if (!isDone() && initialized() && !hasPending()) { for (int i = 0; i < entries.size(); i++) { while (true) { GridCacheEntryEx<K, V> cached = entries.get(i); try { if (!locked(cached)) { if (log.isDebugEnabled()) log.debug( "Lock is still not acquired for entry (will keep waiting) [entry=" + cached + ", fut=" + this + ']'); return false; } break; } // Possible in concurrent cases, when owner is changed after locks // have been released or cancelled. catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug("Got removed entry in onOwnerChanged method (will retry): " + cached); // Replace old entry with new one. entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(cached.key())); } } } if (log.isDebugEnabled()) log.debug("Local lock acquired for entries [fut=" + this + ", entries=" + entries + "]"); onComplete(true, true); return true; } return false; }
/** * Undoes all locks. * * @param dist If {@code true}, then remove locks from remote nodes as well. */ private void undoLocks(boolean dist) { // Transactions will undo during rollback. if (dist && tx == null) cctx.nearTx().removeLocks(lockVer, keys); else { if (tx != null) { if (tx.setRollbackOnly()) { if (log.isDebugEnabled()) log.debug( "Marked transaction as rollback only because locks could not be acquired: " + tx); } else if (log.isDebugEnabled()) log.debug( "Transaction was not marked rollback-only while locks were not acquired: " + tx); } for (GridCacheEntryEx<K, V> e : entriesCopy()) { try { e.removeLock(lockVer); } catch (GridCacheEntryRemovedException ignored) { while (true) { try { e = cctx.cache().peekEx(e.key()); if (e != null) e.removeLock(lockVer); break; } catch (GridCacheEntryRemovedException ignore) { if (log.isDebugEnabled()) log.debug( "Attempted to remove lock on removed entry (will retry) [ver=" + lockVer + ", entry=" + e + ']'); } } } } } cctx.mvcc().recheckPendingLocks(); }
/** * @param ctx Grid kernal context. * @param cacheName Cache name. * @param flushQ Flush queue. */ public GridDataLoaderImpl( final GridKernalContext ctx, @Nullable final String cacheName, DelayQueue<GridDataLoaderImpl<K, V>> flushQ) { assert ctx != null; this.ctx = ctx; this.cacheName = cacheName; this.flushQ = flushQ; log = U.logger(ctx, logRef, GridDataLoaderImpl.class); discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; UUID id = discoEvt.eventNodeId(); // Remap regular mappings. final Buffer buf = bufMappings.remove(id); if (buf != null) { // Only async notification is possible since // discovery thread may be trapped otherwise. ctx.closure() .callLocalSafe( new Callable<Object>() { @Override public Object call() throws Exception { buf.onNodeLeft(); return null; } }, true /* system pool */); } } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); // Generate unique topic for this loader. topic = TOPIC_DATALOAD.topic(GridUuid.fromUuid(ctx.localNodeId())); ctx.io() .addMessageListener( topic, new GridMessageListener() { @Override public void onMessage(UUID nodeId, Object msg) { assert msg instanceof GridDataLoadResponse; GridDataLoadResponse res = (GridDataLoadResponse) msg; if (log.isDebugEnabled()) log.debug("Received data load response: " + res); Buffer buf = bufMappings.get(nodeId); if (buf != null) buf.onResponse(res); else if (log.isDebugEnabled()) log.debug("Ignoring response since node has left [nodeId=" + nodeId + ", "); } }); if (log.isDebugEnabled()) log.debug("Added response listener within topic: " + topic); fut = new GridDataLoaderFuture(ctx, this); }
/** * Process HTTP request. * * @param act Action. * @param req Http request. * @param res Http response. */ private void processRequest(String act, HttpServletRequest req, HttpServletResponse res) { res.setContentType("application/json"); res.setCharacterEncoding("UTF-8"); GridRestCommand cmd = command(req); if (cmd == null) { res.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } if (!authChecker.apply(req.getHeader("X-Signature"))) { res.setStatus(HttpServletResponse.SC_UNAUTHORIZED); return; } GridRestResponse cmdRes; Map<String, Object> params = parameters(req); try { GridRestRequest cmdReq = createRequest(cmd, params, req); if (log.isDebugEnabled()) log.debug("Initialized command request: " + cmdReq); cmdRes = hnd.handle(cmdReq); if (cmdRes == null) throw new IllegalStateException("Received null result from handler: " + hnd); byte[] sesTok = cmdRes.sessionTokenBytes(); if (sesTok != null) cmdRes.setSessionToken(U.byteArray2HexString(sesTok)); res.setStatus(HttpServletResponse.SC_OK); } catch (Exception e) { res.setStatus(HttpServletResponse.SC_OK); U.error(log, "Failed to process HTTP request [action=" + act + ", req=" + req + ']', e); cmdRes = new GridRestResponse(STATUS_FAILED, e.getMessage()); } catch (Throwable e) { U.error(log, "Failed to process HTTP request [action=" + act + ", req=" + req + ']', e); throw e; } JsonConfig cfg = new GridJettyJsonConfig(); // Workaround for not needed transformation of string into JSON object. if (cmdRes.getResponse() instanceof String) cfg.registerJsonValueProcessor(cmdRes.getClass(), "response", SKIP_STR_VAL_PROC); if (cmdRes.getResponse() instanceof GridClientTaskResultBean && ((GridClientTaskResultBean) cmdRes.getResponse()).getResult() instanceof String) cfg.registerJsonValueProcessor(cmdRes.getResponse().getClass(), "result", SKIP_STR_VAL_PROC); JSON json; try { json = JSONSerializer.toJSON(cmdRes, cfg); } catch (JSONException e) { U.error(log, "Failed to convert response to JSON: " + cmdRes, e); json = JSONSerializer.toJSON(new GridRestResponse(STATUS_FAILED, e.getMessage()), cfg); } try { if (log.isDebugEnabled()) log.debug("Parsed command response into JSON object: " + json.toString(2)); res.getWriter().write(json.toString()); if (log.isDebugEnabled()) log.debug( "Processed HTTP request [action=" + act + ", jsonRes=" + cmdRes + ", req=" + req + ']'); } catch (IOException e) { U.error(log, "Failed to send HTTP response: " + json.toString(2), e); } }
/** {@inheritDoc} */ @Override protected Collection<GridComputeJobAdapter> split(int gridSize, Object arg) throws GridException { assert rsrc1 != null; assert rsrc2 != null; assert rsrc3 != null; assert rsrc4 != null; assert log != null; log.info("Injected shared resource1 into task: " + rsrc1); log.info("Injected shared resource2 into task: " + rsrc2); log.info("Injected shared resource3 into task: " + rsrc3); log.info("Injected shared resource4 into task: " + rsrc4); log.info("Injected log resource into task: " + log); task2Rsrc1 = rsrc1; task2Rsrc2 = rsrc2; task2Rsrc3 = rsrc3; task2Rsrc4 = rsrc4; Collection<GridComputeJobAdapter> jobs = new ArrayList<>(gridSize); for (int i = 0; i < gridSize; i++) { jobs.add( new GridComputeJobAdapter() { /** User resource. */ @GridUserResource(resourceClass = UserResource1.class) private transient GridAbstractUserResource rsrc5; /** User resource */ @GridUserResource private transient UserResource2 rsrc6; /** User resource. */ @GridUserResource(resourceClass = UserResource1.class, resourceName = "rsrc3") private transient GridAbstractUserResource rsrc7; /** User resource */ @GridUserResource(resourceName = "rsrc4") private transient UserResource2 rsrc8; /** {@inheritDoc} */ @SuppressWarnings({"ObjectEquality"}) @Override public Serializable execute() { assert rsrc1 != null; assert rsrc2 != null; assert rsrc3 != null; assert rsrc4 != null; assert log != null; assert rsrc5 != null; assert rsrc6 != null; assert rsrc7 != null; assert rsrc8 != null; // Make sure that neither task nor global scope got // created more than once. assert rsrc1 == rsrc5; assert rsrc2 == rsrc6; assert rsrc3 == rsrc7; assert rsrc4 == rsrc8; log.info("Injected shared resource1 into job: " + rsrc1); log.info("Injected shared resource2 into job: " + rsrc2); log.info("Injected shared resource3 into job: " + rsrc3); log.info("Injected shared resource4 into job: " + rsrc4); log.info("Injected shared resource5 into job: " + rsrc5); log.info("Injected shared resource6 into job: " + rsrc6); log.info("Injected shared resource7 into job: " + rsrc7); log.info("Injected shared resource8 into job: " + rsrc8); log.info("Injected log resource into job: " + log); return null; } }); } return jobs; }
/** * @param gridName Grid instance name. Can be {@code null}. * @param exec Executor service. * @param parentLog Parent logger. */ static void runBackgroundCheck(String gridName, Executor exec, GridLogger parentLog) { assert exec != null; assert parentLog != null; final GridLogger log = parentLog.getLogger(GridDiagnostic.class); try { exec.execute( new GridWorker(gridName, "grid-diagnostic-1", log) { @Override public void body() { try { InetAddress localHost = U.getLocalHost(); if (!localHost.isReachable(REACH_TIMEOUT)) { U.warn( log, "Default local host is unreachable. This may lead to delays on " + "grid network operations. Check your OS network setting to correct it.", "Default local host is unreachable."); } } catch (IOException ignore) { U.warn( log, "Failed to perform network diagnostics. It is usually caused by serious " + "network configuration problem. Check your OS network setting to correct it.", "Failed to perform network diagnostics."); } } }); exec.execute( new GridWorker(gridName, "grid-diagnostic-2", log) { @Override public void body() { try { InetAddress localHost = U.getLocalHost(); if (localHost.isLoopbackAddress()) { U.warn( log, "Default local host is a loopback address. This can be a sign of " + "potential network configuration problem.", "Default local host is a loopback address."); } } catch (IOException ignore) { U.warn( log, "Failed to perform network diagnostics. It is usually caused by serious " + "network configuration problem. Check your OS network setting to correct it.", "Failed to perform network diagnostics."); } } }); exec.execute( new GridWorker(gridName, "grid-diagnostic-3", log) { @Override public void body() { String jdkStrLow = U.jdkString().toLowerCase(); if (jdkStrLow.contains("jrockit") && jdkStrLow.contains("1.5.")) { U.warn( log, "BEA JRockit VM ver. 1.5.x has shown problems with NIO functionality in our " + "tests that were not reproducible in other VMs. We recommend using Sun VM. Should you " + "have further questions please contact us at [email protected]", "BEA JRockit VM ver. 1.5.x is not supported."); } } }); exec.execute( new GridWorker(gridName, "grid-diagnostic-4", log) { @Override public void body() { // Sufficiently tested OS. if (!U.isSufficientlyTestedOs()) { U.warn( log, "This operating system has been tested less rigorously: " + U.osString() + ". Our team will appreciate the feedback if you experience any problems running " + "gridgain in this environment. You can always send your feedback to [email protected]", "This OS is tested less rigorously: " + U.osString()); } } }); exec.execute( new GridWorker(gridName, "grid-diagnostic-5", log) { @Override public void body() { // Fix for GG-1075. if (U.allLocalMACs() == null) U.warn( log, "No live network interfaces detected. If IP-multicast discovery is used - " + "make sure to add 127.0.0.1 as a local address.", "No live network interfaces. Add 127.0.0.1 as a local address."); } }); exec.execute( new GridWorker(gridName, "grid-diagnostic-6", log) { @Override public void body() { if (System.getProperty("com.sun.management.jmxremote") != null) { String portStr = System.getProperty("com.sun.management.jmxremote.port"); if (portStr != null) try { Integer.parseInt(portStr); return; } catch (NumberFormatException ignore) { } U.warn( log, "JMX remote management is enabled but JMX port is either not set or invalid. " + "Check system property 'com.sun.management.jmxremote.port' to make sure it specifies " + "valid TCP/IP port.", "JMX remote port is invalid - JMX management is off."); } } }); } catch (RejectedExecutionException e) { U.error( log, "Failed to start background network diagnostics check due to thread pool execution " + "rejection. In most cases it indicates a severe configuration problem with GridGain.", "Failed to start background network diagnostics.", e); } }
/** * Initializes store. * * @throws GridException If failed to initialize. */ private void init() throws GridException { if (initGuard.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Initializing cache store."); try { if (sesFactory != null) // Session factory has been provided - nothing to do. return; if (!F.isEmpty(hibernateCfgPath)) { try { URL url = new URL(hibernateCfgPath); sesFactory = new Configuration().configure(url).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using URL: " + url); // Session factory has been successfully initialized. return; } catch (MalformedURLException e) { if (log.isDebugEnabled()) log.debug("Caught malformed URL exception: " + e.getMessage()); } // Provided path is not a valid URL. File? File cfgFile = new File(hibernateCfgPath); if (cfgFile.exists()) { sesFactory = new Configuration().configure(cfgFile).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using file: " + hibernateCfgPath); // Session factory has been successfully initialized. return; } // Provided path is not a file. Classpath resource? sesFactory = new Configuration().configure(hibernateCfgPath).buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using classpath resource: " + hibernateCfgPath); } else { if (hibernateProps == null) { U.warn( log, "No Hibernate configuration has been provided for store (will use default)."); hibernateProps = new Properties(); hibernateProps.setProperty("hibernate.connection.url", DFLT_CONN_URL); hibernateProps.setProperty("hibernate.show_sql", DFLT_SHOW_SQL); hibernateProps.setProperty("hibernate.hbm2ddl.auto", DFLT_HBM2DDL_AUTO); } Configuration cfg = new Configuration(); cfg.setProperties(hibernateProps); assert resourceAvailable(MAPPING_RESOURCE); cfg.addResource(MAPPING_RESOURCE); sesFactory = cfg.buildSessionFactory(); if (log.isDebugEnabled()) log.debug("Configured session factory using properties: " + hibernateProps); } } catch (HibernateException e) { throw new GridException("Failed to initialize store.", e); } finally { initLatch.countDown(); } } else if (initLatch.getCount() > 0) U.await(initLatch); if (sesFactory == null) throw new GridException("Cache store was not properly initialized."); }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }