/** * Checks that cache is empty. * * @param cache Cache to check. * @throws org.apache.ignite.internal.IgniteInterruptedCheckedException If interrupted while * sleeping. */ @SuppressWarnings({"ErrorNotRethrown", "TypeMayBeWeakened"}) private void checkEmpty(IgniteCache<String, String> cache) throws IgniteInterruptedCheckedException { for (int i = 0; i < 3; i++) { try { assertTrue(!cache.iterator().hasNext()); break; } catch (AssertionError e) { if (i == 2) throw e; info(">>> Cache is not empty, flushing evictions."); U.sleep(1000); } } }
/** * Executes command using {@code shell} channel. * * @param ses SSH session. * @param cmd Command. * @throws JSchException In case of SSH error. * @throws IOException If IO error occurs. * @throws IgniteInterruptedCheckedException If thread was interrupted while waiting. */ private void shell(Session ses, String cmd) throws JSchException, IOException, IgniteInterruptedCheckedException { ChannelShell ch = null; try { ch = (ChannelShell) ses.openChannel("shell"); ch.connect(); try (PrintStream out = new PrintStream(ch.getOutputStream(), true)) { out.println(cmd); U.sleep(1000); } } finally { if (ch != null && ch.isConnected()) ch.disconnect(); } }
/** * Check how prefetch override works. * * @throws Exception IF failed. */ public void testOpenPrefetchOverride() throws Exception { create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE)); // Write enough data to the secondary file system. final int blockSize = IGFS_BLOCK_SIZE; IgfsOutputStream out = igfsSecondary.append(FILE, false); int totalWritten = 0; while (totalWritten < blockSize * 2 + chunk.length) { out.write(chunk); totalWritten += chunk.length; } out.close(); awaitFileClose(igfsSecondary.asSecondary(), FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG)); int seqReads = SEQ_READS_BEFORE_PREFETCH + 1; cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads); FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg); // Read the first two blocks. Path fsHome = new Path(PRIMARY_URI); Path dir = new Path(fsHome, DIR.name()); Path subdir = new Path(dir, SUBDIR.name()); Path file = new Path(subdir, FILE.name()); FSDataInputStream fsIn = fs.open(file); final byte[] readBuf = new byte[blockSize * 2]; fsIn.readFully(0, readBuf, 0, readBuf.length); // Wait for a while for prefetch to finish (if any). IgfsMetaManager meta = igfs.context().meta(); IgfsFileInfo info = meta.info(meta.fileId(FILE)); IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2); IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName()); for (int i = 0; i < 10; i++) { if (dataCache.containsKey(key)) break; else U.sleep(100); } fsIn.close(); // Remove the file from the secondary file system. igfsSecondary.delete(FILE, false); // Try reading the third block. Should fail. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { IgfsInputStream in0 = igfs.open(FILE); in0.seek(blockSize * 2); try { in0.read(readBuf); } finally { U.closeQuiet(in0); } return null; } }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file"); }
/** * Sends cache query response. * * @param nodeId Node to send response. * @param res Cache query response. * @param timeout Message timeout. * @return {@code true} if response was sent, {@code false} otherwise. */ private boolean sendQueryResponse(UUID nodeId, GridCacheQueryResponse res, long timeout) { ClusterNode node = cctx.node(nodeId); if (node == null) return false; int attempt = 1; IgniteCheckedException err = null; while (!Thread.currentThread().isInterrupted()) { try { if (log.isDebugEnabled()) log.debug("Send query response: " + res); Object topic = topic(nodeId, res.requestId()); cctx.io() .sendOrderedMessage( node, topic, res, cctx.ioPolicy(), timeout > 0 ? timeout : Long.MAX_VALUE); return true; } catch (ClusterTopologyCheckedException ignored) { if (log.isDebugEnabled()) log.debug( "Failed to send query response since node left grid [nodeId=" + nodeId + ", res=" + res + "]"); return false; } catch (IgniteCheckedException e) { if (err == null) err = e; if (Thread.currentThread().isInterrupted()) break; if (attempt < RESEND_ATTEMPTS) { if (log.isDebugEnabled()) log.debug( "Failed to send queries response (will try again) [nodeId=" + nodeId + ", res=" + res + ", attempt=" + attempt + ", err=" + e + "]"); if (!Thread.currentThread().isInterrupted()) try { U.sleep(RESEND_FREQ); } catch (IgniteInterruptedCheckedException e1) { U.error( log, "Waiting for queries response resending was interrupted (response will not be sent) " + "[nodeId=" + nodeId + ", response=" + res + "]", e1); return false; } } else { U.error( log, "Failed to sender cache response [nodeId=" + nodeId + ", response=" + res + "]", err); return false; } } attempt++; } return false; }