public boolean existsNode(CuratorFramework zk, String path, boolean watch) throws Exception { Stat stat = null; if (watch) { stat = zk.checkExists().watched().forPath(PathUtils.normalize_path(path)); } else { stat = zk.checkExists().forPath(PathUtils.normalize_path(path)); } return stat != null; }
public Integer getVersion(CuratorFramework zk, String path, boolean watch) throws Exception { String npath = PathUtils.normalize_path(path); Stat stat = null; if (existsNode(zk, npath, watch)) { if (watch) { stat = zk.checkExists().watched().forPath(PathUtils.normalize_path(path)); } else { stat = zk.checkExists().forPath(PathUtils.normalize_path(path)); } return Integer.valueOf(stat.getVersion()); } return null; }
/** * cleanup the topologies which are not in ZK /topology, but in other place * * @param nimbusData * @param active_topologys * @throws Exception */ public void cleanupDisappearedTopology() throws Exception { StormClusterState clusterState = nimbusData.getStormClusterState(); List<String> active_topologys = clusterState.active_storms(); if (active_topologys == null) { return; } Set<String> cleanupIds = get_cleanup_ids(clusterState, active_topologys); for (String topologyId : cleanupIds) { LOG.info("Cleaning up " + topologyId); clusterState.try_remove_storm(topologyId); // nimbusData.getTaskHeartbeatsCache().remove(topologyId); // get /nimbus/stormdist/topologyId String master_stormdist_root = StormConfig.masterStormdistRoot(nimbusData.getConf(), topologyId); try { // delete topologyId local dir PathUtils.rmr(master_stormdist_root); } catch (IOException e) { LOG.warn("Failed to delete " + master_stormdist_root + ",", e); } } }
public String createNode( CuratorFramework zk, String path, byte[] data, org.apache.zookeeper.CreateMode mode) throws Exception { String npath = PathUtils.normalize_path(path); return zk.create().withMode(mode).withACL(ZooDefs.Ids.OPEN_ACL_UNSAFE).forPath(npath, data); }
public void deletereRcursive(CuratorFramework zk, String path) throws Exception { String npath = PathUtils.normalize_path(path); if (existsNode(zk, npath, false)) { zk.delete().guaranteed().deletingChildrenIfNeeded().forPath(npath); } }
public List<String> getChildren(CuratorFramework zk, String path, boolean watch) throws Exception { String npath = PathUtils.normalize_path(path); if (watch) { return zk.getChildren().watched().forPath(npath); } else { return zk.getChildren().forPath(npath); } }
@Override public void set_ephemeral_node(String path, byte[] data) throws Exception { zkobj.mkdirs(zk, PathUtils.parent_path(path)); if (zkobj.exists(zk, path, false)) { zkobj.setData(zk, path, data); } else { zkobj.createNode(zk, path, data, CreateMode.EPHEMERAL); } if (zkCache != null) { zkCache.put(path, data); } }
public void mkdirs(CuratorFramework zk, String path) throws Exception { String npath = PathUtils.normalize_path(path); // the node is "/" if (npath.equals("/")) { return; } // the node exist if (existsNode(zk, npath, false)) { return; } mkdirs(zk, PathUtils.parent_path(npath)); try { createNode(zk, npath, JStormUtils.barr((byte) 7), org.apache.zookeeper.CreateMode.PERSISTENT); } catch (KeeperException e) {; // this can happen when multiple clients doing mkdir at same // time LOG.warn("zookeeper mkdirs for path" + path, e); } }
public static void testJar(String id) { try { PathUtils.local_mkdirs(pidDir); } catch (IOException e) { LOG.error("Failed to rmr " + pidDir, e); } fillData(); LOG.info("Finish load data"); String pid = JStormUtils.process_pid(); String pidFile = pidDir + File.separator + pid; try { PathUtils.touch(pidFile); } catch (IOException e) { // TODO Auto-generated catch block LOG.error("Failed to touch " + pidFile, e); } try { DataOutputStream raf = new DataOutputStream( new BufferedOutputStream(new FileOutputStream(new File(pidFile), true))); raf.writeBytes(pid); } catch (Exception e) { LOG.error("", e); } while (true) { JStormUtils.sleepMs(1000); LOG.info(id + " is living"); } }
public byte[] getData(CuratorFramework zk, String path, boolean watch) throws Exception { String npath = PathUtils.normalize_path(path); try { if (existsNode(zk, npath, watch)) { if (watch) { return zk.getData().watched().forPath(npath); } else { return zk.getData().forPath(npath); } } } catch (KeeperException e) { LOG.error("zookeeper getdata for path" + path, e); } return null; }
/** * get topology ids which need to be cleanup * * @param clusterState * @return * @throws Exception */ private Set<String> get_cleanup_ids(StormClusterState clusterState, List<String> active_topologys) throws Exception { List<String> task_ids = clusterState.task_storms(); List<String> heartbeat_ids = clusterState.heartbeat_storms(); List<String> error_ids = clusterState.task_error_storms(); List<String> assignment_ids = clusterState.assignments(null); List<String> monitor_ids = clusterState.monitors(); String master_stormdist_root = StormConfig.masterStormdistRoot(nimbusData.getConf()); // listdir /local-dir/nimbus/stormdist List<String> code_ids = PathUtils.read_dir_contents(master_stormdist_root); // Set<String> assigned_ids = // JStormUtils.listToSet(clusterState.active_storms()); Set<String> to_cleanup_ids = new HashSet<String>(); if (task_ids != null) { to_cleanup_ids.addAll(task_ids); } if (heartbeat_ids != null) { to_cleanup_ids.addAll(heartbeat_ids); } if (error_ids != null) { to_cleanup_ids.addAll(error_ids); } if (assignment_ids != null) { to_cleanup_ids.addAll(assignment_ids); } if (monitor_ids != null) { to_cleanup_ids.addAll(monitor_ids); } if (code_ids != null) { to_cleanup_ids.addAll(code_ids); } if (active_topologys != null) { to_cleanup_ids.removeAll(active_topologys); } return to_cleanup_ids; }
@Override public void set_data(String path, byte[] data) throws Exception { if (data.length > (JStormUtils.SIZE_1_K * 800)) { throw new Exception( "Writing 800k+ data into ZK is not allowed!, data size is " + data.length); } if (zkobj.exists(zk, path, false)) { zkobj.setData(zk, path, data); } else { zkobj.mkdirs(zk, PathUtils.parent_path(path)); zkobj.createNode(zk, path, data, CreateMode.PERSISTENT); } if (zkCache != null) { zkCache.put(path, data); } }
public static String getZKNodeData(String clusterName, String path) { String out = null; try { ClusterState clusterState = getAndCreateClusterState(clusterName); if (clusterState == null) { throw new IllegalStateException("Cluster state is null"); } byte[] data = clusterState.get_data(PathUtils.normalize_path(path), false); if (data != null && data.length > 0) { Object obj = Utils.maybe_deserialize(data); out = gson.toJson(obj); } } catch (Exception e) { LOG.error("Get zookeeper data error!", e); } return out; }
public static List<ZookeeperNode> listZKNodes(String clusterName, String parent) { List<ZookeeperNode> nodes = new ArrayList<>(); try { ClusterState clusterState = getAndCreateClusterState(clusterName); if (clusterState == null) { throw new IllegalStateException("Cluster state is null"); } List<String> elements = clusterState.get_children(parent, false); for (String element : elements) { String path = PathUtils.normalize_path(parent + Cluster.ZK_SEPERATOR + element); nodes.add(new ZookeeperNode(parent, element, hasChildren(clusterState, path))); } } catch (Exception e) { LOG.error("Get zookeeper info error!", e); } return nodes; }
public Stat setData(CuratorFramework zk, String path, byte[] data) throws Exception { String npath = PathUtils.normalize_path(path); return zk.setData().forPath(npath, data); }
public void deleteNode(CuratorFramework zk, String path) throws Exception { zk.delete().forPath(PathUtils.normalize_path(path)); }