private static String[] runTests(
      int numHosts, int slotsPerHost, int parallelism, TestLocatableInputSplit[] splits)
      throws Exception {
    AbstractJobVertex vertex = new AbstractJobVertex("test vertex");
    vertex.setParallelism(parallelism);
    vertex.setInvokableClass(DummyInvokable.class);
    vertex.setInputSplitSource(new TestInputSplitSource(splits));

    JobGraph jobGraph = new JobGraph("test job", vertex);

    ExecutionGraph eg =
        new ExecutionGraph(
            jobGraph.getJobID(), jobGraph.getName(), jobGraph.getJobConfiguration(), TIMEOUT);
    eg.setQueuedSchedulingAllowed(false);

    eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());

    Scheduler scheduler = getScheduler(numHosts, slotsPerHost);
    eg.scheduleForExecution(scheduler);

    ExecutionVertex[] tasks = eg.getVerticesTopologically().iterator().next().getTaskVertices();
    assertEquals(parallelism, tasks.length);

    String[] hostsForTasks = new String[parallelism];
    for (int i = 0; i < parallelism; i++) {
      hostsForTasks[i] = tasks[i].getCurrentAssignedResourceLocation().getHostname();
    }

    return hostsForTasks;
  }
示例#2
0
  /**
   * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink
   * does not support uploading a jar file before hand. Jar files are always uploaded directly when
   * a program is submitted.
   */
  public void submitTopologyWithOpts(
      final String name, final String uploadedJarLocation, final FlinkTopology topology)
      throws AlreadyAliveException, InvalidTopologyException {

    if (this.getTopologyJobId(name) != null) {
      throw new AlreadyAliveException();
    }

    final URI uploadedJarUri;
    final URL uploadedJarUrl;
    try {
      uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
      uploadedJarUrl = uploadedJarUri.toURL();
      JobWithJars.checkJarFile(uploadedJarUrl);
    } catch (final IOException e) {
      throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
    }

    try {
      FlinkClient.addStormConfigToTopology(topology, conf);
    } catch (ClassNotFoundException e) {
      LOG.error("Could not register class for Kryo serialization.", e);
      throw new InvalidTopologyException("Could not register class for Kryo serialization.");
    }

    final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
    streamGraph.setJobName(name);

    final JobGraph jobGraph = streamGraph.getJobGraph();
    jobGraph.addJar(new Path(uploadedJarUri));

    final Configuration configuration = jobGraph.getJobConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);

    final Client client;
    try {
      client = new Client(configuration);
    } catch (IOException e) {
      throw new RuntimeException("Could not establish a connection to the job manager", e);
    }

    try {
      ClassLoader classLoader =
          JobWithJars.buildUserCodeClassLoader(
              Lists.newArrayList(uploadedJarUrl),
              Collections.<URL>emptyList(),
              this.getClass().getClassLoader());
      client.runDetached(jobGraph, classLoader);
    } catch (final ProgramInvocationException e) {
      throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
    }
  }
  @Test
  public void testMultipleInstancesPerHost() {

    TestLocatableInputSplit[] splits =
        new TestLocatableInputSplit[] {
          new TestLocatableInputSplit(1, "host1"),
          new TestLocatableInputSplit(2, "host1"),
          new TestLocatableInputSplit(3, "host2"),
          new TestLocatableInputSplit(4, "host2"),
          new TestLocatableInputSplit(5, "host3"),
          new TestLocatableInputSplit(6, "host3")
        };

    try {
      AbstractJobVertex vertex = new AbstractJobVertex("test vertex");
      vertex.setParallelism(6);
      vertex.setInvokableClass(DummyInvokable.class);
      vertex.setInputSplitSource(new TestInputSplitSource(splits));

      JobGraph jobGraph = new JobGraph("test job", vertex);

      ExecutionGraph eg =
          new ExecutionGraph(
              jobGraph.getJobID(), jobGraph.getName(), jobGraph.getJobConfiguration(), TIMEOUT);

      eg.attachJobGraph(jobGraph.getVerticesSortedTopologicallyFromSources());
      eg.setQueuedSchedulingAllowed(false);

      // create a scheduler with 6 instances where always two are on the same host
      Scheduler scheduler = new Scheduler();
      Instance i1 = getInstance(new byte[] {10, 0, 1, 1}, 12345, "host1", 1);
      Instance i2 = getInstance(new byte[] {10, 0, 1, 1}, 12346, "host1", 1);
      Instance i3 = getInstance(new byte[] {10, 0, 1, 2}, 12345, "host2", 1);
      Instance i4 = getInstance(new byte[] {10, 0, 1, 2}, 12346, "host2", 1);
      Instance i5 = getInstance(new byte[] {10, 0, 1, 3}, 12345, "host3", 1);
      Instance i6 = getInstance(new byte[] {10, 0, 1, 3}, 12346, "host4", 1);
      scheduler.newInstanceAvailable(i1);
      scheduler.newInstanceAvailable(i2);
      scheduler.newInstanceAvailable(i3);
      scheduler.newInstanceAvailable(i4);
      scheduler.newInstanceAvailable(i5);
      scheduler.newInstanceAvailable(i6);

      eg.scheduleForExecution(scheduler);

      ExecutionVertex[] tasks = eg.getVerticesTopologically().iterator().next().getTaskVertices();
      assertEquals(6, tasks.length);

      Instance taskInstance1 = tasks[0].getCurrentAssignedResource().getInstance();
      Instance taskInstance2 = tasks[1].getCurrentAssignedResource().getInstance();
      Instance taskInstance3 = tasks[2].getCurrentAssignedResource().getInstance();
      Instance taskInstance4 = tasks[3].getCurrentAssignedResource().getInstance();
      Instance taskInstance5 = tasks[4].getCurrentAssignedResource().getInstance();
      Instance taskInstance6 = tasks[5].getCurrentAssignedResource().getInstance();

      assertTrue(taskInstance1 == i1 || taskInstance1 == i2);
      assertTrue(taskInstance2 == i1 || taskInstance2 == i2);
      assertTrue(taskInstance3 == i3 || taskInstance3 == i4);
      assertTrue(taskInstance4 == i3 || taskInstance4 == i4);
      assertTrue(taskInstance5 == i5 || taskInstance5 == i6);
      assertTrue(taskInstance6 == i5 || taskInstance6 == i6);
    } catch (Exception e) {
      e.printStackTrace();
      fail(e.getMessage());
    }
  }