/** * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink * does not support uploading a jar file before hand. Jar files are always uploaded directly when * a program is submitted. */ public void submitTopologyWithOpts( final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException { if (this.getTopologyJobId(name) != null) { throw new AlreadyAliveException(); } final URI uploadedJarUri; final URL uploadedJarUrl; try { uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI(); uploadedJarUrl = uploadedJarUri.toURL(); JobWithJars.checkJarFile(uploadedJarUrl); } catch (final IOException e) { throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e); } try { FlinkClient.addStormConfigToTopology(topology, conf); } catch (ClassNotFoundException e) { LOG.error("Could not register class for Kryo serialization.", e); throw new InvalidTopologyException("Could not register class for Kryo serialization."); } final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph(); streamGraph.setJobName(name); final JobGraph jobGraph = streamGraph.getJobGraph(); jobGraph.addJar(new Path(uploadedJarUri)); final Configuration configuration = jobGraph.getJobConfiguration(); configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost); configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort); final Client client; try { client = new Client(configuration); } catch (IOException e) { throw new RuntimeException("Could not establish a connection to the job manager", e); } try { ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader( Lists.newArrayList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader()); client.runDetached(jobGraph, classLoader); } catch (final ProgramInvocationException e) { throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e); } }
@SuppressWarnings("rawtypes") public void submitTopologyWithOpts( final String topologyName, final Map conf, final FlinkTopology topology, final SubmitOptions submitOpts) throws Exception { LOG.info("Running Storm topology on FlinkLocalCluster"); if (conf != null) { topology.getConfig().setGlobalJobParameters(new StormConfig(conf)); } StreamGraph streamGraph = topology.getStreamGraph(); streamGraph.setJobName(topologyName); JobGraph jobGraph = streamGraph.getJobGraph(); this.flink.submitJobDetached(jobGraph); }
private static StreamOperator<?> getOperatorForDataStream(DataStream<?> dataStream) { StreamExecutionEnvironment env = dataStream.getExecutionEnvironment(); StreamGraph streamGraph = env.getStreamGraph(); return streamGraph.getStreamNode(dataStream.getId()).getOperator(); }
/** * Tests union functionality. This ensures that self-unions and unions of streams with differing * parallelism work. * * @throws Exception */ @Test public void testUnion() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(4); DataStream<Long> input1 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }); DataStream<Long> selfUnion = input1 .union(input1) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }); DataStream<Long> input6 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }); DataStream<Long> selfUnionDifferentPartition = input6 .broadcast() .union(input6) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }); DataStream<Long> input2 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(4); DataStream<Long> input3 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(2); DataStream<Long> unionDifferingParallelism = input2 .union(input3) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(4); DataStream<Long> input4 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(2); DataStream<Long> input5 = env.generateSequence(0, 0) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(4); DataStream<Long> unionDifferingPartitioning = input4 .broadcast() .union(input5) .map( new MapFunction<Long, Long>() { @Override public Long map(Long value) throws Exception { return null; } }) .setParallelism(4); StreamGraph streamGraph = env.getStreamGraph(); // verify self union assertTrue(streamGraph.getStreamNode(selfUnion.getId()).getInEdges().size() == 2); for (StreamEdge edge : streamGraph.getStreamNode(selfUnion.getId()).getInEdges()) { assertTrue(edge.getPartitioner() instanceof ForwardPartitioner); } // verify self union with differnt partitioners assertTrue( streamGraph.getStreamNode(selfUnionDifferentPartition.getId()).getInEdges().size() == 2); boolean hasForward = false; boolean hasBroadcast = false; for (StreamEdge edge : streamGraph.getStreamNode(selfUnionDifferentPartition.getId()).getInEdges()) { if (edge.getPartitioner() instanceof ForwardPartitioner) { hasForward = true; } if (edge.getPartitioner() instanceof BroadcastPartitioner) { hasBroadcast = true; } } assertTrue(hasForward && hasBroadcast); // verify union of streams with differing parallelism assertTrue( streamGraph.getStreamNode(unionDifferingParallelism.getId()).getInEdges().size() == 2); for (StreamEdge edge : streamGraph.getStreamNode(unionDifferingParallelism.getId()).getInEdges()) { if (edge.getSourceId() == input2.getId()) { assertTrue(edge.getPartitioner() instanceof ForwardPartitioner); } else if (edge.getSourceId() == input3.getId()) { assertTrue(edge.getPartitioner() instanceof RebalancePartitioner); } else { fail("Wrong input edge."); } } // verify union of streams with differing partitionings assertTrue( streamGraph.getStreamNode(unionDifferingPartitioning.getId()).getInEdges().size() == 2); for (StreamEdge edge : streamGraph.getStreamNode(unionDifferingPartitioning.getId()).getInEdges()) { if (edge.getSourceId() == input4.getId()) { assertTrue(edge.getPartitioner() instanceof BroadcastPartitioner); } else if (edge.getSourceId() == input5.getId()) { assertTrue(edge.getPartitioner() instanceof ForwardPartitioner); } else { fail("Wrong input edge."); } } }