/** {@inheritDoc} */ @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { jobName = U.readString(in); user = U.readString(in); hasCombiner = in.readBoolean(); numReduces = in.readInt(); props = U.readStringMap(in); }
/** {@inheritDoc} */ @Override public void writeExternal(ObjectOutput out) throws IOException { U.writeString(out, jobName); U.writeString(out, user); out.writeBoolean(hasCombiner); out.writeInt(numReduces); U.writeStringMap(out, props); }
/** {@inheritDoc} */ @Override public void onDiscoveryDataReceived(UUID nodeId, UUID rmtNodeId, Serializable data) { Map<String, Serializable> discData = (Map<String, Serializable>) data; if (discData != null) { for (Map.Entry<String, Serializable> e : discData.entrySet()) { PluginProvider provider = plugins.get(e.getKey()); if (provider != null) provider.receiveDiscoveryData(nodeId, e.getValue()); else U.warn(log, "Received discovery data for unknown plugin: " + e.getKey()); } } }
/** Print plugins information. */ private void ackPluginsInfo() { U.quietAndInfo(log, "Configured plugins:"); if (plugins.isEmpty()) { U.quietAndInfo(log, " ^-- None"); U.quietAndInfo(log, ""); } else { for (PluginProvider plugin : plugins.values()) { U.quietAndInfo(log, " ^-- " + plugin.name() + " " + plugin.version()); U.quietAndInfo(log, " ^-- " + plugin.copyright()); U.quietAndInfo(log, ""); } } }
/** * Test how IPC cache map works. * * @throws Exception If failed. */ @SuppressWarnings("unchecked") public void testIpcCache() throws Exception { Field cacheField = GridGgfsHadoopIpcIo.class.getDeclaredField("ipcCache"); cacheField.setAccessible(true); Field activeCntField = GridGgfsHadoopIpcIo.class.getDeclaredField("activeCnt"); activeCntField.setAccessible(true); Map<String, GridGgfsHadoopIpcIo> cache = (Map<String, GridGgfsHadoopIpcIo>) cacheField.get(null); String name = "ggfs:" + getTestGridName(0) + "@"; Configuration cfg = new Configuration(); cfg.addResource(U.resolveGridGainUrl(HADOOP_FS_CFG)); cfg.setBoolean("fs.ggfs.impl.disable.cache", true); cfg.setBoolean(String.format(GridGgfsHadoopUtils.PARAM_GGFS_ENDPOINT_NO_EMBED, name), true); // Ensure that existing IO is reused. FileSystem fs1 = FileSystem.get(new URI("ggfs://" + name + "/"), cfg); assertEquals(1, cache.size()); GridGgfsHadoopIpcIo io = null; System.out.println("CACHE: " + cache); for (String key : cache.keySet()) { if (key.contains("10500")) { io = cache.get(key); break; } } assert io != null; assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); // Ensure that when IO is used by multiple file systems and one of them is closed, IO is not // stopped. FileSystem fs2 = FileSystem.get(new URI("ggfs://" + name + "/abc"), cfg); assertEquals(1, cache.size()); assertEquals(2, ((AtomicInteger) activeCntField.get(io)).get()); fs2.close(); assertEquals(1, cache.size()); assertEquals(1, ((AtomicInteger) activeCntField.get(io)).get()); Field stopField = GridGgfsHadoopIpcIo.class.getDeclaredField("stopping"); stopField.setAccessible(true); assert !(Boolean) stopField.get(io); // Ensure that IO is stopped when nobody else is need it. fs1.close(); assert cache.isEmpty(); assert (Boolean) stopField.get(io); }