/** @param e Error. */ void onError(Throwable e) { tx.commitError(e); if (err.compareAndSet(null, e)) { boolean marked = tx.setRollbackOnly(); if (e instanceof GridCacheTxRollbackException) { if (marked) { try { tx.rollback(); } catch (GridException ex) { U.error(log, "Failed to automatically rollback transaction: " + tx, ex); } } } else if (tx.implicit() && tx.isSystemInvalidate()) { // Finish implicit transaction on heuristic error. try { tx.close(); } catch (GridException ex) { U.error(log, "Failed to invalidate transaction: " + tx, ex); } } onComplete(); } }
/** * Message received callback. * * @param src Sender node ID. * @param msg Received message. * @return {@code True}. */ public boolean onMessageReceived(UUID src, GridHadoopMessage msg) { if (msg instanceof GridHadoopShuffleMessage) { GridHadoopShuffleMessage m = (GridHadoopShuffleMessage) msg; try { job(m.jobId()).onShuffleMessage(m); } catch (GridException e) { U.error(log, "Message handling failed.", e); } try { // Reply with ack. send0(src, new GridHadoopShuffleAck(m.id(), m.jobId())); } catch (GridException e) { U.error( log, "Failed to reply back to shuffle message sender [snd=" + src + ", msg=" + msg + ']', e); } } else if (msg instanceof GridHadoopShuffleAck) { GridHadoopShuffleAck m = (GridHadoopShuffleAck) msg; try { job(m.jobId()).onShuffleAck(m); } catch (GridException e) { U.error(log, "Message handling failed.", e); } } else throw new IllegalStateException( "Unknown message type received to Hadoop shuffle [src=" + src + ", msg=" + msg + ']'); return true; }
/** * Creates new HTTP requests handler. * * @param hnd Handler. * @param authChecker Authentication checking closure. * @param log Logger. */ GridJettyRestHandler( GridRestProtocolHandler hnd, GridClosure<String, Boolean> authChecker, GridLogger log) { assert hnd != null; assert log != null; this.hnd = hnd; this.log = log; this.authChecker = authChecker; // Init default page and favicon. try { initDefaultPage(); if (log.isDebugEnabled()) log.debug("Initialized default page."); } catch (IOException e) { U.warn(log, "Failed to initialize default page: " + e.getMessage()); } try { initFavicon(); if (log.isDebugEnabled()) log.debug( favicon != null ? "Initialized favicon, size: " + favicon.length : "Favicon is null."); } catch (IOException e) { U.warn(log, "Failed to initialize favicon: " + e.getMessage()); } }
/** @return Nodes to execute on. */ private Collection<GridNode> nodes() { GridCacheMode cacheMode = cctx.config().getCacheMode(); switch (cacheMode) { case LOCAL: if (prj != null) U.warn( log, "Ignoring query projection because it's executed over LOCAL cache " + "(only local node will be queried): " + this); return Collections.singletonList(cctx.localNode()); case REPLICATED: if (prj != null) return nodes(cctx, prj); GridCacheDistributionMode mode = cctx.config().getDistributionMode(); return mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED ? Collections.singletonList(cctx.localNode()) : Collections.singletonList(F.rand(nodes(cctx, null))); case PARTITIONED: return nodes(cctx, prj); default: throw new IllegalStateException("Unknown cache distribution mode: " + cacheMode); } }
/** * Starts the local node and checks for presence of log file. Also checks that this is really a * log of a started node. * * @param id Test-local node ID. * @throws Exception If error occurred. */ private void checkOneNode(int id) throws Exception { try (Grid grid = G.start(getConfiguration("grid" + id))) { String id8 = U.id8(grid.localNode().id()); String logPath = "work/log/gridgain-" + id8 + ".log"; File logFile = U.resolveGridGainPath(logPath); assertNotNull("Failed to resolve path: " + logPath, logFile); assertTrue("Log file does not exist: " + logFile, logFile.exists()); String logContent = U.readFileToString(logFile.getAbsolutePath(), "UTF-8"); assertTrue( "Log file does not contain it's node ID: " + logFile, logContent.contains(">>> Local node [ID=" + id8.toUpperCase())); } }
/** Initializes future. */ @SuppressWarnings({"unchecked"}) void finish() { if (tx.onePhaseCommit()) { // No need to send messages as transaction was already committed on remote node. markInitialized(); return; } if (mappings != null) { finish(mappings.values()); markInitialized(); if (!isSync()) { boolean complete = true; for (GridFuture<?> f : pending()) // Mini-future in non-sync mode gets done when message gets sent. if (isMini(f) && !f.isDone()) complete = false; if (complete) onComplete(); } } else { assert !commit; try { tx.rollback(); } catch (GridException e) { U.error(log, "Failed to rollback empty transaction: " + tx, e); } markInitialized(); } }
/** {@inheritDoc} */ @Override public boolean onDone(GridCacheTx tx, Throwable err) { if ((initialized() || err != null) && super.onDone(tx, err)) { if (error() instanceof GridCacheTxHeuristicException) { long topVer = this.tx.topologyVersion(); for (GridCacheTxEntry<K, V> e : this.tx.writeMap().values()) { try { if (e.op() != NOOP && !cctx.affinity().localNode(e.key(), topVer)) { GridCacheEntryEx<K, V> cacheEntry = cctx.cache().peekEx(e.key()); if (cacheEntry != null) cacheEntry.invalidate(null, this.tx.xidVersion()); } } catch (Throwable t) { U.error(log, "Failed to invalidate entry.", t); if (t instanceof Error) throw (Error) t; } } } // Don't forget to clean up. cctx.mvcc().removeFuture(this); return true; } return false; }
/** Stops Jetty. */ private void stopJetty() { // Jetty does not really stop the server if port is busy. try { if (httpSrv != null) { // If server was successfully started, deregister ports. if (httpSrv.isStarted()) ctx.ports().deregisterPorts(getClass()); // Record current interrupted status of calling thread. boolean interrupted = Thread.interrupted(); try { httpSrv.stop(); } finally { // Reset interrupted flag on calling thread. if (interrupted) Thread.currentThread().interrupt(); } } } catch (InterruptedException ignored) { if (log.isDebugEnabled()) log.debug("Thread has been interrupted."); Thread.currentThread().interrupt(); } catch (Exception e) { U.error(log, "Failed to stop Jetty HTTP server.", e); } }
/** * Loads test configuration. * * @throws Exception if configuration is unawailable or broken. */ private void loadTestConfiguration() throws Exception { assert TEST_CONFIGURATION_FILE.isFile(); InputStream in = null; Properties p = new Properties(); try { in = new FileInputStream(TEST_CONFIGURATION_FILE); p.load(in); } finally { U.closeQuiet(in); } clientNodes = Integer.parseInt(p.getProperty("client.nodes.count")); srvNodes = Integer.parseInt(p.getProperty("server.nodes.count")); threadsPerClient = Integer.parseInt(p.getProperty("threads.per.client")); cancelRate = Integer.parseInt(p.getProperty("cancel.rate")); submitDelay = Long.parseLong(p.getProperty("submit.delay")); taskParams = new GridJobLoadTestParams( Integer.parseInt(p.getProperty("jobs.count")), Integer.parseInt(p.getProperty("jobs.test.duration")), Integer.parseInt(p.getProperty("jobs.test.completion.delay")), Double.parseDouble(p.getProperty("jobs.failure.probability"))); }
/** {@inheritDoc} */ @Override public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds) throws GridSpiException { assert !F.isEmpty(nodeIds); long now = U.currentTimeMillis(); Collection<UUID> expired = new LinkedList<>(); for (UUID id : nodeIds) { GridNodeMetrics nodeMetrics = metricsMap.get(id); Long ts = tsMap.get(id); if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id); } if (!expired.isEmpty()) { Map<UUID, GridNodeMetrics> refreshed = metrics0(expired); for (UUID id : refreshed.keySet()) tsMap.put(id, now); metricsMap.putAll(refreshed); } return F.view(metricsMap, F.contains(nodeIds)); }
/** @throws IOException If failed. */ private void initFavicon() throws IOException { assert favicon == null; InputStream in = getClass().getResourceAsStream("favicon.ico"); if (in != null) { BufferedInputStream bis = new BufferedInputStream(in); ByteArrayOutputStream bos = new ByteArrayOutputStream(); try { byte[] buf = new byte[2048]; while (true) { int n = bis.read(buf); if (n == -1) break; bos.write(buf, 0, n); } favicon = bos.toByteArray(); } finally { U.closeQuiet(bis); } } }
/** @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testCancel() throws Exception { Grid grid = G.grid(getTestGridName()); grid.compute() .localDeployTask(GridCancelTestTask.class, U.detectClassLoader(GridCancelTestTask.class)); GridComputeTaskFuture<?> fut = grid.compute().execute(GridCancelTestTask.class.getName(), null); // Wait until jobs begin execution. boolean await = startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not start."; info("Test task result: " + fut); assert fut != null; // Only first job should successfully complete. Object res = fut.get(); assert (Integer) res == 1; // Wait for all jobs to finish. await = stopSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS); assert await : "Jobs did not stop."; // One is definitely processed. But there might be some more processed or cancelled or processed // and cancelled. // Thus total number should be at least SPLIT_COUNT and at most (SPLIT_COUNT - 1) *2 +1 assert (cancelCnt + processedCnt) >= SPLIT_COUNT && (cancelCnt + processedCnt) <= (SPLIT_COUNT - 1) * 2 + 1 : "Invalid cancel count value: " + cancelCnt; }
/** * Method cleans up all events that either outnumber queue size or exceeds time-to-live value. It * does none if someone else cleans up queue (lock is locked) or if there are queue readers * (readersNum > 0). */ private void cleanupQueue() { long now = U.currentTimeMillis(); long queueOversize = evts.sizex() - expireCnt; for (int i = 0; i < queueOversize && evts.sizex() > expireCnt; i++) { GridEvent expired = evts.poll(); if (log.isDebugEnabled()) log.debug("Event expired by count: " + expired); } while (true) { ConcurrentLinkedDeque8.Node<GridEvent> node = evts.peekx(); if (node == null) // Queue is empty. break; GridEvent evt = node.item(); if (evt == null) // Competing with another thread. continue; if (now - evt.timestamp() < expireAgeMs) break; if (evts.unlinkx(node) && log.isDebugEnabled()) log.debug("Event expired by age: " + node.item()); } }
static { try { TEST_CONF_DIR = new File(U.resolveGridGainUrl("/modules/core/src/test/config/job-loadtest").toURI()); } catch (URISyntaxException e) { throw new RuntimeException("Failed to initialize directory.", e); } }
/** * @param nodeId Sender. * @param res Result. */ void onResult(UUID nodeId, GridNearLockResponse<K, V> res) { if (!isDone()) { if (log.isDebugEnabled()) log.debug( "Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); for (GridFuture<Boolean> fut : pending()) { if (isMini(fut)) { MiniFuture mini = (MiniFuture) fut; if (mini.futureId().equals(res.miniId())) { assert mini.node().id().equals(nodeId); if (log.isDebugEnabled()) log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']'); mini.onResult(res); if (log.isDebugEnabled()) log.debug( "Future after processed lock response [fut=" + this + ", mini=" + mini + ", res=" + res + ']'); return; } } } U.warn( log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res + ", fut=" + this + ']'); } else if (log.isDebugEnabled()) log.debug( "Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']'); }
/** @param jobId Job id. */ public void jobFinished(GridHadoopJobId jobId) { GridHadoopShuffleJob job = jobs.remove(jobId); if (job != null) { try { job.close(); } catch (GridException e) { U.error(log, "Failed to close job: " + jobId, e); } } }
/** * Checks entry for empty value. * * @param entry Entry to check. * @return {@code True} if entry is empty. */ private boolean empty(GridCacheEntry<K, V> entry) { try { return entry.peek(F.asList(GLOBAL)) == null; } catch (GridException e) { U.error(null, e.getMessage(), e); assert false : "Should never happen: " + e; return false; } }
/** * Stops shuffle. * * @param cancel If should cancel all ongoing activities. */ @Override public void stop(boolean cancel) { for (GridHadoopShuffleJob job : jobs.values()) { try { job.close(); } catch (GridException e) { U.error(log, "Failed to close job.", e); } } jobs.clear(); }
/** * Flushes every internal buffer if buffer was flushed before passed in threshold. * * <p>Does not wait for result and does not fail on errors assuming that this method should be * called periodically. */ @Override public void tryFlush() throws GridInterruptedException { if (!busyLock.enterBusy()) return; try { for (Buffer buf : bufMappings.values()) buf.flush(); lastFlushTime = U.currentTimeMillis(); } finally { leaveBusy(); } }
/** * Reconstructs object on demarshalling. * * @return Reconstructed object. * @throws ObjectStreamException Thrown in case of demarshalling error. */ @SuppressWarnings("unchecked") private Object readResolve() throws ObjectStreamException { try { GridBiTuple<GridCacheContext, String> t = stash.get(); return t.get1().dataStructures().atomicReference(t.get2(), null, false); } catch (GridException e) { throw U.withCause(new InvalidObjectException(e.getMessage()), e); } finally { stash.remove(); } }
/** * Parses HTTP parameters in an appropriate format and return back map of values to predefined * list of names. * * @param req Request. * @return Map of parsed parameters. */ @SuppressWarnings({"unchecked"}) private Map<String, Object> parameters(ServletRequest req) { Map<String, String[]> params = req.getParameterMap(); if (F.isEmpty(params)) return Collections.emptyMap(); Map<String, Object> map = U.newHashMap(params.size()); for (Map.Entry<String, String[]> entry : params.entrySet()) map.put(entry.getKey(), parameter(entry.getValue())); return map; }
void cancelAll() { GridException err = new GridException("Data loader has been cancelled: " + GridDataLoaderImpl.this); for (GridFuture<?> f : locFuts) { try { f.cancel(); } catch (GridException e) { U.error(log, "Failed to cancel mini-future.", e); } } for (GridFutureAdapter<?> f : reqs.values()) f.onDone(err); }
/** {@inheritDoc} */ @Override public ClassLoader classLoader() { if (ldr == null) { ClassLoader ldr0 = deployClass().getClassLoader(); // Safety. if (ldr0 == null) ldr0 = U.gridClassLoader(); assert ldr0 != null : "Failed to detect classloader [objs=" + objs + ']'; ldr = ldr0; } return ldr; }
/** {@inheritDoc} */ @Override public Class<?> deployClass() { if (cls == null) { Class<?> cls0 = null; if (depCls != null) cls0 = depCls; else { for (Iterator<Object> it = objs.iterator(); (cls0 == null || U.isJdk(cls0)) && it.hasNext(); ) { Object o = it.next(); if (o != null) cls0 = U.detectClass(o); } if (cls0 == null || U.isJdk(cls0)) cls0 = GridDataLoaderImpl.class; } assert cls0 != null : "Failed to detect deploy class [objs=" + objs + ']'; cls = cls0; } return cls; }
/** * @param cctx Context. * @param tx Transaction. * @param commit Commit flag. */ public GridNearTxFinishFuture( GridCacheContext<K, V> cctx, GridNearTxLocal<K, V> tx, boolean commit) { super(cctx.kernalContext(), F.<GridCacheTx>identityReducer(tx)); this.cctx = cctx; this.tx = tx; this.commit = commit; ignoreInterrupts(true); mappings = tx.mappings(); futId = GridUuid.randomUuid(); log = U.logger(ctx, logRef, GridNearTxFinishFuture.class); }
/** @param e Error. */ void onResult(Throwable e) { if (rcvRes.compareAndSet(false, true)) { if (log.isDebugEnabled()) log.debug("Failed to get future result [fut=" + this + ", err=" + e + ']'); // Fail. onDone(e); } else U.warn( log, "Received error after another result has been processed [fut=" + GridNearLockFuture.this + ", mini=" + this + ']', e); }
/** {@inheritDoc} */ @Override public void isolated(boolean isolated) throws GridException { if (isolated()) return; GridNode node = F.first(ctx.grid().forCache(cacheName).nodes()); if (node == null) throw new GridException("Failed to get node for cache: " + cacheName); GridCacheAttributes a = U.cacheAttributes(node, cacheName); assert a != null; updater = a.atomicityMode() == GridCacheAtomicityMode.ATOMIC ? GridDataLoadCacheUpdaters.<K, V>batched() : GridDataLoadCacheUpdaters.<K, V>groupLocked(); }
/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { nodeId = GridUtils.readUuid(in); ver = CU.readVersion(in); timeout = in.readLong(); threadId = in.readLong(); id = in.readLong(); short flags = in.readShort(); mask(OWNER, OWNER.get(flags)); mask(USED, USED.get(flags)); mask(TX, TX.get(flags)); ts = U.currentTimeMillis(); }
/** * Send message optionally either blocking it or throwing an exception if it is of {@link * GridJobExecuteResponse} type. * * @param node Destination node. * @param msg Message to be sent. * @throws GridSpiException If failed. */ private void sendMessage0(GridNode node, GridTcpCommunicationMessageAdapter msg) throws GridSpiException { if (msg instanceof GridIoMessage) { GridIoMessage msg0 = (GridIoMessage) msg; if (msg0.message() instanceof GridJobExecuteResponse) { respLatch.countDown(); if (wait) { try { U.await(waitLatch); } catch (GridInterruptedException ignore) { // No-op. } } } } if (!block) super.sendMessage(node, msg); }
/** @param res Response. */ void onResponse(GridDataLoadResponse res) { if (log.isDebugEnabled()) log.debug("Received data load response: " + res); GridFutureAdapter<?> f = reqs.remove(res.requestId()); if (f == null) { if (log.isDebugEnabled()) log.debug("Future for request has not been found: " + res.requestId()); return; } Throwable err = null; byte[] errBytes = res.errorBytes(); if (errBytes != null) { try { GridPeerDeployAware jobPda0 = jobPda; err = ctx.config() .getMarshaller() .unmarshal( errBytes, jobPda0 != null ? jobPda0.classLoader() : U.gridClassLoader()); } catch (GridException e) { f.onDone(null, new GridException("Failed to unmarshal response.", e)); return; } } f.onDone(null, err); if (log.isDebugEnabled()) log.debug( "Finished future [fut=" + f + ", reqId=" + res.requestId() + ", err=" + err + ']'); }