@Test
 public void testUnexpectedSavepointBackend() throws Exception {
   Configuration config = new Configuration();
   config.setString(SavepointStoreFactory.SAVEPOINT_BACKEND_KEY, "unexpected");
   SavepointStore store = SavepointStoreFactory.createFromConfig(config);
   assertTrue(store.getStateStore() instanceof HeapStateStore);
 }
Esempio n. 2
0
  public void killTopologyWithOpts(final String name, final KillOptions options)
      throws NotAliveException {
    final JobID jobId = this.getTopologyJobId(name);
    if (jobId == null) {
      throw new NotAliveException("Storm topology with name " + name + " not found.");
    }

    if (options != null) {
      try {
        Thread.sleep(1000 * options.get_wait_secs());
      } catch (final InterruptedException e) {
        throw new RuntimeException(e);
      }
    }

    final Configuration configuration = GlobalConfiguration.getConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, this.jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, this.jobManagerPort);

    final Client client;
    try {
      client = new Client(configuration);
    } catch (final IOException e) {
      throw new RuntimeException("Could not establish a connection to the job manager", e);
    }

    try {
      client.cancel(jobId);
    } catch (final Exception e) {
      throw new RuntimeException("Cannot stop job.", e);
    }
  }
 @Test
 public void testSavepointBackendFileSystemButNoDirectory() throws Exception {
   Configuration config = new Configuration();
   config.setString(SavepointStoreFactory.SAVEPOINT_BACKEND_KEY, "filesystem");
   SavepointStore store = SavepointStoreFactory.createFromConfig(config);
   assertTrue(store.getStateStore() instanceof HeapStateStore);
 }
  /**
   * Checks if the local instance manager reads the default correctly from the configuration file.
   */
  @Test
  public void testInstanceTypeFromConfiguration() {

    try {
      Configuration cfg = new Configuration();
      cfg.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "127.0.0.1");
      cfg.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 6123);
      cfg.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 1);
      cfg.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 1);

      GlobalConfiguration.includeConfiguration(cfg);

      // start JobManager
      ExecutionMode executionMode = ExecutionMode.LOCAL;
      JobManager jm = new JobManager(executionMode);

      final TestInstanceListener testInstanceListener = new TestInstanceListener();

      InstanceManager im = jm.getInstanceManager();
      try {
        im.setInstanceListener(testInstanceListener);

      } catch (Exception e) {
        e.printStackTrace();
        Assert.fail("Instantiation of LocalInstanceManager failed: " + e.getMessage());
      } finally {
        jm.shutdown();
      }
    } catch (Exception e) {
      System.err.println(e.getMessage());
      e.printStackTrace();
      Assert.fail("Test caused an error: " + e.getMessage());
    }
  }
    /**
     * All arguments are parsed to a {@link Configuration} and passed to the Taskmanager, for
     * instance: <code>
     * --high-availability ZOOKEEPER --high-availability.zookeeper.quorum "xyz:123:456"</code>.
     */
    public static void main(String[] args) throws Exception {
      try {
        Configuration config = ParameterTool.fromArgs(args).getConfiguration();

        if (!config.containsKey(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY)) {
          config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 4);
        }

        if (!config.containsKey(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY)) {
          config.setInteger(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY, 100);
        }

        LOG.info("Configuration: {}.", config);

        // Run the TaskManager
        TaskManager.selectNetworkInterfaceAndRunTaskManager(
            config, ResourceID.generate(), TaskManager.class);

        // Run forever
        new CountDownLatch(1).await();
      } catch (Throwable t) {
        LOG.error("Failed to start TaskManager process", t);
        System.exit(1);
      }
    }
  @BeforeClass
  public static void setupJobManager() {
    Configuration config = new Configuration();

    int port = NetUtils.getAvailablePort();

    config.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "localhost");
    config.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, port);

    scala.Option<Tuple2<String, Object>> listeningAddress =
        scala.Option.apply(new Tuple2<String, Object>("localhost", port));
    jobManagerSystem = AkkaUtils.createActorSystem(config, listeningAddress);
    ActorRef jobManagerActorRef =
        JobManager.startJobManagerActors(
                config,
                jobManagerSystem,
                StreamingMode.BATCH_ONLY,
                JobManager.class,
                MemoryArchivist.class)
            ._1();

    try {
      LeaderRetrievalService lrs = LeaderRetrievalUtils.createLeaderRetrievalService(config);

      jmGateway = LeaderRetrievalUtils.retrieveLeaderGateway(lrs, jobManagerSystem, timeout);
    } catch (Exception e) {
      fail("Could not retrieve the JobManager gateway. " + e.getMessage());
    }
  }
Esempio n. 7
0
  private FiniteDuration getTimeout() {
    final Configuration configuration = GlobalConfiguration.getConfiguration();
    if (this.timeout != null) {
      configuration.setString(ConfigConstants.AKKA_ASK_TIMEOUT, this.timeout);
    }

    return AkkaUtils.getClientTimeout(configuration);
  }
Esempio n. 8
0
  private StateBackend<?> createStateBackend() throws Exception {
    StateBackend<?> configuredBackend = configuration.getStateBackend(userClassLoader);

    if (configuredBackend != null) {
      // backend has been configured on the environment
      LOG.info("Using user-defined state backend: " + configuredBackend);
      return configuredBackend;
    } else {
      // see if we have a backend specified in the configuration
      Configuration flinkConfig = getEnvironment().getTaskManagerInfo().getConfiguration();
      String backendName = flinkConfig.getString(ConfigConstants.STATE_BACKEND, null);

      if (backendName == null) {
        LOG.warn(
            "No state backend has been specified, using default state backend (Memory / JobManager)");
        backendName = "jobmanager";
      }

      backendName = backendName.toLowerCase();
      switch (backendName) {
        case "jobmanager":
          LOG.info("State backend is set to heap memory (checkpoint to jobmanager)");
          return MemoryStateBackend.defaultInstance();

        case "filesystem":
          FsStateBackend backend = new FsStateBackendFactory().createFromConfig(flinkConfig);
          LOG.info(
              "State backend is set to heap memory (checkpoints to filesystem \""
                  + backend.getBasePath()
                  + "\")");
          return backend;

        default:
          try {
            @SuppressWarnings("rawtypes")
            Class<? extends StateBackendFactory> clazz =
                Class.forName(backendName, false, userClassLoader)
                    .asSubclass(StateBackendFactory.class);

            return clazz.newInstance().createFromConfig(flinkConfig);
          } catch (ClassNotFoundException e) {
            throw new IllegalConfigurationException(
                "Cannot find configured state backend: " + backendName);
          } catch (ClassCastException e) {
            throw new IllegalConfigurationException(
                "The class configured under '"
                    + ConfigConstants.STATE_BACKEND
                    + "' is not a valid state backend factory ("
                    + backendName
                    + ')');
          } catch (Throwable t) {
            throw new IllegalConfigurationException("Cannot create configured state backend", t);
          }
      }
    }
  }
  @BeforeClass
  public static void startActorSystem() {
    config = new Configuration();
    config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "5 s");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "200 ms");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "2 s");
    config.setDouble(ConfigConstants.AKKA_WATCH_THRESHOLD, 2.0);

    actorSystem = AkkaUtils.createLocalActorSystem(config);
  }
  @BeforeClass
  public static void startTestCluster() {
    Configuration config = new Configuration();
    config.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, 2);
    config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, PARALLELISM / 2);
    config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 48);
    config.setString(ConfigConstants.EXECUTION_RETRY_DELAY_KEY, "0 ms");

    cluster = new ForkableFlinkMiniCluster(config, false);
    cluster.start();
  }
  @Test
  public void testSavepointBackendFileSystemButCheckpointBackendJobManager() throws Exception {
    Configuration config = new Configuration();

    // This combination does not make sense, because the checkpoints will be
    // lost after the job manager shuts down.
    config.setString(ConfigConstants.STATE_BACKEND, "jobmanager");
    config.setString(SavepointStoreFactory.SAVEPOINT_BACKEND_KEY, "filesystem");
    SavepointStore store = SavepointStoreFactory.createFromConfig(config);
    assertTrue(store.getStateStore() instanceof HeapStateStore);
  }
  /**
   * Ensure that we will never have the following error.
   *
   * <p>The test works as follows: - Use the CliFrontend to invoke a jar file that loads a class
   * which is only available in the jarfile itself (via a custom classloader) - Change the Usercode
   * classloader of the PackagedProgram to a special classloader for this test - the classloader
   * will accept the special class (and return a String.class)
   *
   * <p>org.apache.flink.client.program.ProgramInvocationException: The main method caused an error.
   * at org.apache.flink.client.program.PackagedProgram.callMainMethod(PackagedProgram.java:398) at
   * org.apache.flink.client.program.PackagedProgram.invokeInteractiveModeForExecution(PackagedProgram.java:301)
   * at org.apache.flink.client.program.Client.getOptimizedPlan(Client.java:140) at
   * org.apache.flink.client.program.Client.getOptimizedPlanAsJson(Client.java:125) at
   * org.apache.flink.client.CliFrontend.info(CliFrontend.java:439) at
   * org.apache.flink.client.CliFrontend.parseParameters(CliFrontend.java:931) at
   * org.apache.flink.client.CliFrontend.main(CliFrontend.java:951) Caused by: java.io.IOException:
   * java.lang.RuntimeException: java.lang.ClassNotFoundException:
   * org.apache.hadoop.hive.ql.io.RCFileInputFormat at
   * org.apache.hcatalog.mapreduce.HCatInputFormat.setInput(HCatInputFormat.java:102) at
   * org.apache.hcatalog.mapreduce.HCatInputFormat.setInput(HCatInputFormat.java:54) at
   * tlabs.CDR_In_Report.createHCatInputFormat(CDR_In_Report.java:322) at
   * tlabs.CDR_Out_Report.main(CDR_Out_Report.java:380) at
   * sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at
   * sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at
   * sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at
   * java.lang.reflect.Method.invoke(Method.java:622) at
   * org.apache.flink.client.program.PackagedProgram.callMainMethod(PackagedProgram.java:383)
   */
  @Test
  public void testPlanWithExternalClass() throws CompilerException, ProgramInvocationException {
    final Boolean callme[] = {
      false
    }; // create a final object reference, to be able to change its val later
    try {
      String[] parameters = {
        getTestJarPath(), "-c", TEST_JAR_CLASSLOADERTEST_CLASS, "some", "program"
      };
      CommandLine line =
          new PosixParser()
              .parse(CliFrontend.getProgramSpecificOptions(new Options()), parameters, false);

      CliFrontend frontend = new CliFrontend();
      Object result = frontend.buildProgram(line);
      assertTrue(result instanceof PackagedProgram);

      PackagedProgram prog = spy((PackagedProgram) result);

      ClassLoader testClassLoader =
          new ClassLoader(prog.getUserCodeClassLoader()) {
            @Override
            public Class<?> loadClass(String name) throws ClassNotFoundException {
              assertTrue(name.equals("org.apache.hadoop.hive.ql.io.RCFileInputFormat"));
              callme[0] = true;
              return String.class; // Intentionally return the wrong class.
            }
          };
      when(prog.getUserCodeClassLoader()).thenReturn(testClassLoader);

      Assert.assertArrayEquals(new String[] {"some", "program"}, prog.getArguments());
      Assert.assertEquals(TEST_JAR_CLASSLOADERTEST_CLASS, prog.getMainClassName());

      Configuration c = new Configuration();
      c.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "devil");
      Client cli = new Client(c, getClass().getClassLoader());

      cli.getOptimizedPlanAsJson(prog, 666);
    } catch (ProgramInvocationException pie) {
      assertTrue("Classloader was not called", callme[0]);
      // class not found exception is expected as some point
      if (!(pie.getCause() instanceof ClassNotFoundException)) {
        System.err.println(pie.getMessage());
        pie.printStackTrace();
        fail("Program caused an exception: " + pie.getMessage());
      }
    } catch (Exception e) {
      assertTrue("Classloader was not called", callme[0]);
      System.err.println(e.getMessage());
      e.printStackTrace();
      fail("Program caused an exception: " + e.getMessage());
    }
  }
Esempio n. 13
0
  /**
   * Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink
   * does not support uploading a jar file before hand. Jar files are always uploaded directly when
   * a program is submitted.
   */
  public void submitTopologyWithOpts(
      final String name, final String uploadedJarLocation, final FlinkTopology topology)
      throws AlreadyAliveException, InvalidTopologyException {

    if (this.getTopologyJobId(name) != null) {
      throw new AlreadyAliveException();
    }

    final URI uploadedJarUri;
    final URL uploadedJarUrl;
    try {
      uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
      uploadedJarUrl = uploadedJarUri.toURL();
      JobWithJars.checkJarFile(uploadedJarUrl);
    } catch (final IOException e) {
      throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
    }

    try {
      FlinkClient.addStormConfigToTopology(topology, conf);
    } catch (ClassNotFoundException e) {
      LOG.error("Could not register class for Kryo serialization.", e);
      throw new InvalidTopologyException("Could not register class for Kryo serialization.");
    }

    final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
    streamGraph.setJobName(name);

    final JobGraph jobGraph = streamGraph.getJobGraph();
    jobGraph.addJar(new Path(uploadedJarUri));

    final Configuration configuration = jobGraph.getJobConfiguration();
    configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
    configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);

    final Client client;
    try {
      client = new Client(configuration);
    } catch (IOException e) {
      throw new RuntimeException("Could not establish a connection to the job manager", e);
    }

    try {
      ClassLoader classLoader =
          JobWithJars.buildUserCodeClassLoader(
              Lists.newArrayList(uploadedJarUrl),
              Collections.<URL>emptyList(),
              this.getClass().getClassLoader());
      client.runDetached(jobGraph, classLoader);
    } catch (final ProgramInvocationException e) {
      throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
    }
  }
 private void startLocalCluster() {
   synchronized (lock) {
     if (localCluster == null) {
       org.apache.flink.configuration.Configuration configuration =
           new org.apache.flink.configuration.Configuration();
       configuration.setInteger(
           ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, env.getParallelism() * 2);
       localCluster = new LocalFlinkMiniCluster(configuration, false);
       localCluster.start();
     }
     localClusterUsers++;
   }
 }
Esempio n. 15
0
  static Configuration createReporterConfig(Configuration config, TimeUnit timeunit, long period) {
    Configuration reporterConfig = new Configuration();
    reporterConfig.setLong("period", period);
    reporterConfig.setString("timeunit", timeunit.name());

    String[] arguments = config.getString(KEY_METRICS_REPORTER_ARGUMENTS, "").split(" ");
    if (arguments.length > 1) {
      for (int x = 0; x < arguments.length; x += 2) {
        reporterConfig.setString(arguments[x].replace("--", ""), arguments[x + 1]);
      }
    }
    return reporterConfig;
  }
Esempio n. 16
0
  /**
   * Package internal method to get a Flink {@link JobID} from a Storm topology name.
   *
   * @param id The Storm topology name.
   * @return Flink's internally used {@link JobID}.
   */
  JobID getTopologyJobId(final String id) {
    final Configuration configuration = GlobalConfiguration.getConfiguration();
    if (this.timeout != null) {
      configuration.setString(ConfigConstants.AKKA_ASK_TIMEOUT, this.timeout);
    }

    try {
      final ActorRef jobManager = this.getJobManager();

      final FiniteDuration askTimeout = this.getTimeout();
      final Future<Object> response =
          Patterns.ask(
              jobManager,
              JobManagerMessages.getRequestRunningJobsStatus(),
              new Timeout(askTimeout));

      Object result;
      try {
        result = Await.result(response, askTimeout);
      } catch (final Exception e) {
        throw new RuntimeException("Could not retrieve running jobs from the JobManager", e);
      }

      if (result instanceof RunningJobsStatus) {
        final List<JobStatusMessage> jobs = ((RunningJobsStatus) result).getStatusMessages();

        for (final JobStatusMessage status : jobs) {
          if (status.getJobName().equals(id)) {
            return status.getJobId();
          }
        }
      } else {
        throw new RuntimeException(
            "ReqeustRunningJobs requires a response of type "
                + "RunningJobs. Instead the response is of type "
                + result.getClass()
                + ".");
      }
    } catch (final IOException e) {
      throw new RuntimeException(
          "Could not connect to Flink JobManager with address "
              + this.jobManagerHost
              + ":"
              + this.jobManagerPort,
          e);
    }

    return null;
  }
  @Test
  public void testSavepointBackendFileSystem() throws Exception {
    Configuration config = new Configuration();
    String rootPath = System.getProperty("java.io.tmpdir");
    config.setString(ConfigConstants.STATE_BACKEND, "filesystem");
    config.setString(SavepointStoreFactory.SAVEPOINT_BACKEND_KEY, "filesystem");
    config.setString(SavepointStoreFactory.SAVEPOINT_DIRECTORY_KEY, rootPath);

    SavepointStore store = SavepointStoreFactory.createFromConfig(config);
    assertTrue(store.getStateStore() instanceof FileSystemStateStore);

    FileSystemStateStore<CompletedCheckpoint> stateStore =
        (FileSystemStateStore<CompletedCheckpoint>) store.getStateStore();
    assertEquals(new Path(rootPath), stateStore.getRootPath());
  }
Esempio n. 18
0
 private static JMXReporter startJmxReporter(Configuration config) {
   JMXReporter reporter = null;
   try {
     Configuration reporterConfig = new Configuration();
     String portRange = config.getString(KEY_METRICS_JMX_PORT, null);
     if (portRange != null) {
       reporterConfig.setString(KEY_METRICS_JMX_PORT, portRange);
     }
     reporter = new JMXReporter();
     reporter.open(reporterConfig);
   } catch (Exception e) {
     LOG.error("Failed to instantiate JMX reporter.", e);
   } finally {
     return reporter;
   }
 }
Esempio n. 19
0
  @BeforeClass
  public static void startCluster() {
    try {
      Configuration config = new Configuration();
      config.setInteger(
          ConfigConstants.LOCAL_INSTANCE_MANAGER_NUMBER_TASK_MANAGER, NUM_TASK_MANAGERS);
      config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, NUM_TASK_SLOTS);
      config.setString(ConfigConstants.DEFAULT_EXECUTION_RETRY_DELAY_KEY, "0 ms");
      config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 12);

      cluster = new ForkableFlinkMiniCluster(config, false);
    } catch (Exception e) {
      e.printStackTrace();
      fail("Failed to start test cluster: " + e.getMessage());
    }
  }
  /**
   * Creates the props needed to instantiate this actor.
   *
   * <p>Rather than extracting and validating parameters in the constructor, this factory method
   * takes care of that. That way, errors occur synchronously, and are not swallowed simply in a
   * failed asynchronous attempt to start the actor.
   *
   * @param actorClass The actor class, to allow overriding this actor with subclasses for testing.
   * @param flinkConfig The Flink configuration object.
   * @param taskManagerParameters The parameters for launching TaskManager containers.
   * @param taskManagerLaunchContext The parameters for launching the TaskManager processes in the
   *     TaskManager containers.
   * @param numInitialTaskManagers The initial number of TaskManagers to allocate.
   * @param log The logger to log to.
   * @return The Props object to instantiate the MesosFlinkResourceManager actor.
   */
  public static Props createActorProps(
      Class<? extends MesosFlinkResourceManager> actorClass,
      Configuration flinkConfig,
      MesosConfiguration mesosConfig,
      MesosWorkerStore workerStore,
      LeaderRetrievalService leaderRetrievalService,
      MesosTaskManagerParameters taskManagerParameters,
      Protos.TaskInfo.Builder taskManagerLaunchContext,
      int numInitialTaskManagers,
      Logger log) {
    final int maxFailedTasks =
        flinkConfig.getInteger(ConfigConstants.MESOS_MAX_FAILED_TASKS, numInitialTaskManagers);
    if (maxFailedTasks >= 0) {
      log.info("Mesos framework tolerates {} failed tasks before giving up", maxFailedTasks);
    }

    return Props.create(
        actorClass,
        flinkConfig,
        mesosConfig,
        workerStore,
        leaderRetrievalService,
        taskManagerParameters,
        taskManagerLaunchContext,
        maxFailedTasks,
        numInitialTaskManagers);
  }
Esempio n. 21
0
  @Override
  public void open() {
    out = new ByteArrayOutputStream();
    flinkConf = new org.apache.flink.configuration.Configuration();
    Properties intpProperty = getProperty();
    for (Object k : intpProperty.keySet()) {
      String key = (String) k;
      String val = toString(intpProperty.get(key));
      flinkConf.setString(key, val);
    }

    if (localMode()) {
      startFlinkMiniCluster();
    }

    flinkIloop = new FlinkILoop(getHost(), getPort(), (BufferedReader) null, new PrintWriter(out));
    flinkIloop.settings_$eq(createSettings());
    flinkIloop.createInterpreter();

    imain = flinkIloop.intp();

    // prepare bindings
    imain.interpret("@transient var _binder = new java.util.HashMap[String, Object]()");
    binder = (Map<String, Object>) getValue("_binder");

    // import libraries
    imain.interpret("import scala.tools.nsc.io._");
    imain.interpret("import Properties.userHome");
    imain.interpret("import scala.compat.Platform.EOL");

    imain.interpret("import org.apache.flink.api.scala._");
    imain.interpret("import org.apache.flink.api.common.functions._");
    imain.bindValue("env", flinkIloop.scalaEnv());
  }
  @Before
  public void before() throws Exception {
    Tasks.BlockingOnceReceiver$.MODULE$.blocking_$eq(true);

    configuration = new Configuration();

    configuration.setInteger(ConfigConstants.LOCAL_NUMBER_JOB_MANAGER, numJMs);
    configuration.setInteger(ConfigConstants.LOCAL_NUMBER_TASK_MANAGER, numTMs);
    configuration.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, numSlotsPerTM);

    cluster =
        new LeaderElectionRetrievalTestingCluster(
            configuration, true, false, StreamingMode.BATCH_ONLY);
    cluster.start(false); // TaskManagers don't have to register at the JobManager

    cluster.waitForActorsToBeAlive(); // we only wait until all actors are alive
  }
  @Override
  public void setInput(
      Map<Operator<?>, OptimizerNode> contractToNode, ExecutionMode defaultExchangeMode)
      throws CompilerException {
    // see if an internal hint dictates the strategy to use
    final Configuration conf = getOperator().getParameters();
    final String shipStrategy = conf.getString(Optimizer.HINT_SHIP_STRATEGY, null);
    final ShipStrategyType preSet;

    if (shipStrategy != null) {
      if (shipStrategy.equalsIgnoreCase(Optimizer.HINT_SHIP_STRATEGY_REPARTITION_HASH)) {
        preSet = ShipStrategyType.PARTITION_HASH;
      } else if (shipStrategy.equalsIgnoreCase(Optimizer.HINT_SHIP_STRATEGY_REPARTITION_RANGE)) {
        preSet = ShipStrategyType.PARTITION_RANGE;
      } else if (shipStrategy.equalsIgnoreCase(Optimizer.HINT_SHIP_STRATEGY_FORWARD)) {
        preSet = ShipStrategyType.FORWARD;
      } else if (shipStrategy.equalsIgnoreCase(Optimizer.HINT_SHIP_STRATEGY_REPARTITION)) {
        preSet = ShipStrategyType.PARTITION_RANDOM;
      } else {
        throw new CompilerException("Unrecognized ship strategy hint: " + shipStrategy);
      }
    } else {
      preSet = null;
    }

    // get the predecessor node
    Operator<?> children = ((SingleInputOperator<?, ?, ?>) getOperator()).getInput();

    OptimizerNode pred;
    DagConnection conn;
    if (children == null) {
      throw new CompilerException(
          "Error: Node for '" + getOperator().getName() + "' has no input.");
    } else {
      pred = contractToNode.get(children);
      conn = new DagConnection(pred, this, defaultExchangeMode);
      if (preSet != null) {
        conn.setShipStrategy(preSet);
      }
    }

    // create the connection and add it
    setIncomingConnection(conn);
    pred.addOutgoingConnection(conn);
  }
Esempio n. 24
0
  @Test
  public void testPassingConfigurationObject() throws Exception {
    /*
     * Test passing configuration object.
     */

    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

    DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.getSmall3TupleDataSet(env);
    Configuration conf = new Configuration();
    conf.setInteger(testKey, testValue);
    DataSet<Tuple3<Integer, Long, String>> bcMapDs = ds.map(new RichMapper2()).withParameters(conf);
    List<Tuple3<Integer, Long, String>> result = bcMapDs.collect();

    String expected = "1,1,Hi\n" + "2,2,Hello\n" + "3,2,Hello world";

    compareResultAsTuples(result, expected);
  }
Esempio n. 25
0
  /** See documentation */
  public static int calculateHeapSize(
      int memory, org.apache.flink.configuration.Configuration conf) {

    BootstrapTools.substituteDeprecatedConfigKey(
        conf,
        ConfigConstants.YARN_HEAP_CUTOFF_RATIO,
        ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_RATIO);
    BootstrapTools.substituteDeprecatedConfigKey(
        conf, ConfigConstants.YARN_HEAP_CUTOFF_MIN, ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_MIN);

    float memoryCutoffRatio =
        conf.getFloat(
            ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_RATIO,
            ConfigConstants.DEFAULT_YARN_HEAP_CUTOFF_RATIO);
    int minCutoff =
        conf.getInteger(
            ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_MIN,
            ConfigConstants.DEFAULT_YARN_HEAP_CUTOFF);

    if (memoryCutoffRatio > 1 || memoryCutoffRatio < 0) {
      throw new IllegalArgumentException(
          "The configuration value '"
              + ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_RATIO
              + "' must be between 0 and 1. Value given="
              + memoryCutoffRatio);
    }
    if (minCutoff > memory) {
      throw new IllegalArgumentException(
          "The configuration value '"
              + ConfigConstants.CONTAINERIZED_HEAP_CUTOFF_MIN
              + "' is higher ("
              + minCutoff
              + ") than the requested amount of memory "
              + memory);
    }

    int heapLimit = (int) ((float) memory * memoryCutoffRatio);
    if (heapLimit < minCutoff) {
      heapLimit = minCutoff;
    }
    return memory - heapLimit;
  }
Esempio n. 26
0
  /**
   * Registers an newly incoming runtime task with the task manager.
   *
   * @param id the ID of the task to register
   * @param jobConfiguration the job configuration that has been attached to the original job graph
   * @param environment the environment of the task to be registered
   * @return the task to be started or <code>null</code> if a task with the same ID was already
   *     running
   */
  private Task createAndRegisterTask(
      final ExecutionVertexID id,
      final Configuration jobConfiguration,
      final RuntimeEnvironment environment)
      throws InsufficientResourcesException, IOException {

    if (id == null) {
      throw new IllegalArgumentException("Argument id is null");
    }

    if (environment == null) {
      throw new IllegalArgumentException("Argument environment is null");
    }

    // Task creation and registration must be atomic
    Task task;

    synchronized (this) {
      final Task runningTask = this.runningTasks.get(id);
      boolean registerTask = true;
      if (runningTask == null) {
        task = new Task(id, environment, this);
      } else {

        if (runningTask instanceof Task) {
          // Task is already running
          return null;
        } else {
          // There is already a replay task running, we will simply restart it
          task = runningTask;
          registerTask = false;
        }
      }

      if (registerTask) {
        // Register the task with the byte buffered channel manager
        this.channelManager.register(task);

        boolean enableProfiling = false;
        if (this.profiler != null
            && jobConfiguration.getBoolean(ProfilingUtils.PROFILE_JOB_KEY, true)) {
          enableProfiling = true;
        }

        // Register environment, input, and output gates for profiling
        if (enableProfiling) {
          task.registerProfiler(this.profiler, jobConfiguration);
        }

        this.runningTasks.put(id, task);
      }
    }
    return task;
  }
Esempio n. 27
0
 /**
  * Method to extract environment variables from the flinkConfiguration based on the given prefix
  * String.
  *
  * @param envPrefix Prefix for the environment variables key
  * @param flinkConfiguration The Flink config to get the environment variable defintion from
  */
 public static Map<String, String> getEnvironmentVariables(
     String envPrefix, org.apache.flink.configuration.Configuration flinkConfiguration) {
   Map<String, String> result = new HashMap<>();
   for (Map.Entry<String, String> entry : flinkConfiguration.toMap().entrySet()) {
     if (entry.getKey().startsWith(envPrefix) && entry.getKey().length() > envPrefix.length()) {
       // remove prefix
       String key = entry.getKey().substring(envPrefix.length());
       result.put(key, entry.getValue());
     }
   }
   return result;
 }
  private static StreamTask<?, ?> createMockTask() {
    Configuration configuration = new Configuration();
    configuration.setString(ConfigConstants.STATE_BACKEND, "jobmanager");

    StreamTask<?, ?> task = mock(StreamTask.class);
    when(task.getAccumulatorMap()).thenReturn(new HashMap<String, Accumulator<?, ?>>());
    when(task.getName()).thenReturn("Test task name");
    when(task.getExecutionConfig()).thenReturn(new ExecutionConfig());

    final TaskManagerRuntimeInfo mockTaskManagerRuntimeInfo = mock(TaskManagerRuntimeInfo.class);
    when(mockTaskManagerRuntimeInfo.getConfiguration()).thenReturn(configuration);

    final Environment env = mock(Environment.class);
    when(env.getTaskInfo()).thenReturn(new TaskInfo("Test task name", 1, 0, 1, 0));
    when(env.getUserClassLoader())
        .thenReturn(AggregatingAlignedProcessingTimeWindowOperatorTest.class.getClassLoader());
    when(env.getMetricGroup()).thenReturn(new UnregisteredTaskMetricsGroup());

    when(task.getEnvironment()).thenReturn(env);
    return task;
  }
  @Override
  public void open() {
    out = new ByteArrayOutputStream();
    flinkConf = new org.apache.flink.configuration.Configuration();
    Properties intpProperty = getProperty();
    for (Object k : intpProperty.keySet()) {
      String key = (String) k;
      String val = toString(intpProperty.get(key));
      flinkConf.setString(key, val);
    }

    if (localMode()) {
      startFlinkMiniCluster();
    }

    flinkIloop =
        new FlinkILoop(
            getHost(), getPort(), flinkConf, (BufferedReader) null, new PrintWriter(out));

    flinkIloop.settings_$eq(createSettings());
    flinkIloop.createInterpreter();

    imain = flinkIloop.intp();

    org.apache.flink.api.scala.ExecutionEnvironment benv = flinkIloop.scalaBenv();
    // new ExecutionEnvironment(remoteBenv)
    org.apache.flink.streaming.api.scala.StreamExecutionEnvironment senv = flinkIloop.scalaSenv();

    senv.getConfig().disableSysoutLogging();
    benv.getConfig().disableSysoutLogging();

    // prepare bindings
    imain.interpret("@transient var _binder = new java.util.HashMap[String, Object]()");
    Map<String, Object> binder = (Map<String, Object>) getLastObject();

    // import libraries
    imain.interpret("import scala.tools.nsc.io._");
    imain.interpret("import Properties.userHome");
    imain.interpret("import scala.compat.Platform.EOL");

    imain.interpret("import org.apache.flink.api.scala._");
    imain.interpret("import org.apache.flink.api.common.functions._");

    binder.put("benv", benv);
    imain.interpret(
        "val benv = _binder.get(\"benv\").asInstanceOf[" + benv.getClass().getName() + "]");

    binder.put("senv", senv);
    imain.interpret(
        "val senv = _binder.get(\"senv\").asInstanceOf[" + senv.getClass().getName() + "]");
  }
  private Plan getTestPlanRightStatic(String strategy) {

    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(DEFAULT_PARALLELISM);

    DataSet<Tuple3<Long, Long, Long>> bigInput =
        env.readCsvFile("file://bigFile").types(Long.class, Long.class, Long.class).name("bigFile");

    DataSet<Tuple3<Long, Long, Long>> smallInput =
        env.readCsvFile("file://smallFile")
            .types(Long.class, Long.class, Long.class)
            .name("smallFile");

    IterativeDataSet<Tuple3<Long, Long, Long>> iteration = bigInput.iterate(10);

    Configuration joinStrategy = new Configuration();
    joinStrategy.setString(
        Optimizer.HINT_SHIP_STRATEGY, Optimizer.HINT_SHIP_STRATEGY_REPARTITION_HASH);

    if (!strategy.equals("")) {
      joinStrategy.setString(Optimizer.HINT_LOCAL_STRATEGY, strategy);
    }

    DataSet<Tuple3<Long, Long, Long>> inner =
        iteration
            .join(smallInput)
            .where(0)
            .equalTo(0)
            .with(new DummyJoiner())
            .name("DummyJoiner")
            .withParameters(joinStrategy);

    DataSet<Tuple3<Long, Long, Long>> output = iteration.closeWith(inner);

    output.output(new DiscardingOutputFormat<Tuple3<Long, Long, Long>>());

    return env.createProgramPlan();
  }