private Option<SakaiBulkDownloadFolder> findBulkDownloadFolder()
      throws NotValidDownloadFolderException {
    File[] foundFiles =
        folder.listFiles(
            new FileFilter() {

              public boolean accept(File path) {
                return path.isDirectory()
                    && path.getName().equals(MergingEnvironment.get().getAssignmentName());
              }
            });
    if (foundFiles.length > 0) {
      return Option.apply(new SakaiBulkDownloadFolder(foundFiles[0].getAbsolutePath()));
    }

    Option<File> zipFile =
        DirectoryUtils.find(
            folder,
            new FileFilter() {
              public boolean accept(File pathname) {
                return pathname.getName().endsWith(".zip");
              }
            });
    if (zipFile.isEmpty()) {
      return Option.empty();
    }

    // Extract the zip to look for the folder
    try {
      System.out.println("Extracting bulk downloads...");
      ZipFile zip = new ZipFile(zipFile.get());
      zip.extractAll(folder.getAbsolutePath());

      // Look for a folder, taking the first one found.
      Option<File> resultsFolder =
          DirectoryUtils.find(
              folder,
              new FileFilter() {
                public boolean accept(File pathname) {
                  return pathname.isDirectory()
                      && pathname.getName().equals(MergingEnvironment.get().getAssignmentName());
                }
              });
      if (resultsFolder.isDefined()) {
        try {
          return Option.apply(new SakaiBulkDownloadFolder(resultsFolder.get().getAbsolutePath()));
        } catch (Exception e) {
          return Option.empty();
        }
      }
      System.out.println("done.");
      return Option.empty();
    } catch (ZipException e) {
      return Option.empty();
    }
  }
 public OperatorStatus onInputOrParameterChange(
     Map<String, TabularSchema> inputSchemas,
     OperatorParameters params,
     OperatorSchemaManager operatorSchemaManager) {
   this.updateOutputSchema(inputSchemas, params, operatorSchemaManager);
   scala.Option<String> msg = Option.empty();
   return new OperatorStatus(true, msg);
 }
Exemplo n.º 3
0
 @Override
 public Option<OperatorCheckpointStats> getCheckpointStats() {
   CheckpointStatsTracker tracker = getGraph().getCheckpointStatsTracker();
   if (tracker == null) {
     return Option.empty();
   } else {
     return tracker.getOperatorStats(getJobVertexId());
   }
 }
Exemplo n.º 4
0
 // #http-context-creation
 public static HttpsContext create(
     SSLContext sslContext,
     Optional<Collection<String>> enabledCipherSuites,
     Optional<Collection<String>> enabledProtocols,
     Optional<ClientAuth> clientAuth,
     Optional<SSLParameters> sslParameters)
       // #http-context-creation
     {
   final scala.Option<scala.collection.immutable.Seq<String>> ecs;
   if (enabledCipherSuites.isPresent())
     ecs = scala.Option.apply(Util.immutableSeq(enabledCipherSuites.get()));
   else ecs = scala.Option.empty();
   final scala.Option<scala.collection.immutable.Seq<String>> ep;
   if (enabledProtocols.isPresent())
     ep = scala.Option.apply(Util.immutableSeq(enabledProtocols.get()));
   else ep = scala.Option.empty();
   return new akka.http.scaladsl.HttpsContext(
       sslContext,
       ecs,
       ep,
       OptionConverters.toScala(clientAuth),
       OptionConverters.toScala(sslParameters));
 }
/**
 * The tests in this class verify the behavior of the TaskManager when connecting to the JobManager,
 * and when the JobManager is unreachable.
 */
public class TaskManagerRegistrationTest extends TestLogger {

  private static final Option<String> NONE_STRING = Option.empty();

  // use one actor system throughout all tests
  private static ActorSystem actorSystem;

  private static Configuration config;

  private static FiniteDuration timeout = new FiniteDuration(20, TimeUnit.SECONDS);

  @BeforeClass
  public static void startActorSystem() {
    config = new Configuration();
    config.setString(ConfigConstants.AKKA_ASK_TIMEOUT, "5 s");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_INTERVAL, "200 ms");
    config.setString(ConfigConstants.AKKA_WATCH_HEARTBEAT_PAUSE, "2 s");
    config.setDouble(ConfigConstants.AKKA_WATCH_THRESHOLD, 2.0);

    actorSystem = AkkaUtils.createLocalActorSystem(config);
  }

  @AfterClass
  public static void shutdownActorSystem() {
    if (actorSystem != null) {
      actorSystem.shutdown();
    }
  }

  /** A test that verifies that two TaskManagers correctly register at the JobManager. */
  @Test
  public void testSimpleRegistration() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway jobManager = null;
        ActorGateway taskManager1 = null;
        ActorGateway taskManager2 = null;

        try {
          // a simple JobManager
          jobManager = createJobManager(actorSystem, config);
          startResourceManager(config, jobManager.actor());

          // start two TaskManagers. it will automatically try to register
          taskManager1 = createTaskManager(actorSystem, jobManager, config, true, false);

          taskManager2 = createTaskManager(actorSystem, jobManager, config, true, false);

          // check that the TaskManagers are registered
          Future<Object> responseFuture1 =
              taskManager1.ask(
                  TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), timeout);

          Future<Object> responseFuture2 =
              taskManager2.ask(
                  TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), timeout);

          Object response1 = Await.result(responseFuture1, timeout);
          Object response2 = Await.result(responseFuture2, timeout);

          // this is a hack to work around the way Java can interact with scala case objects
          Class<?> confirmClass = TaskManagerMessages.getRegisteredAtJobManagerMessage().getClass();
          assertTrue(response1 != null && confirmClass.isAssignableFrom(response1.getClass()));
          assertTrue(response2 != null && confirmClass.isAssignableFrom(response2.getClass()));

          // check that the JobManager has 2 TaskManagers registered
          Future<Object> numTaskManagersFuture =
              jobManager.ask(JobManagerMessages.getRequestNumberRegisteredTaskManager(), timeout);

          Integer count = (Integer) Await.result(numTaskManagersFuture, timeout);
          assertEquals(2, count.intValue());
        } catch (Exception e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManager1);
          stopActor(taskManager2);
          stopActor(jobManager);
        }
      }
    };
  }

  /** A test that verifies that two TaskManagers correctly register at the JobManager. */
  @Test
  public void testDelayedRegistration() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway jobManager = null;
        ActorGateway taskManager = null;

        FiniteDuration delayedTimeout = timeout.$times(3);

        try {
          // start a TaskManager that tries to register at the JobManager before the JobManager is
          // available. we give it the regular JobManager akka URL
          taskManager =
              createTaskManager(
                  actorSystem,
                  JobManager.getLocalJobManagerAkkaURL(Option.<String>empty()),
                  new Configuration(),
                  true,
                  false);

          // let it try for a bit
          Thread.sleep(6000);

          // now start the JobManager, with the regular akka URL
          jobManager = createJobManager(actorSystem, new Configuration());

          startResourceManager(config, jobManager.actor());

          startResourceManager(config, jobManager.actor());

          // check that the TaskManagers are registered
          Future<Object> responseFuture =
              taskManager.ask(
                  TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), delayedTimeout);

          Object response = Await.result(responseFuture, delayedTimeout);

          // this is a hack to work around the way Java can interact with scala case objects
          Class<?> confirmClass = TaskManagerMessages.getRegisteredAtJobManagerMessage().getClass();
          assertTrue(response != null && confirmClass.isAssignableFrom(response.getClass()));

        } catch (Exception e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManager);
          stopActor(jobManager);
        }
      }
    };
  }

  /**
   * Tests that the TaskManager shuts down when it cannot register at the JobManager within the
   * given maximum duration.
   *
   * <p>Unfortunately, this test does not give good error messages. (I have not figured out how to
   * get any better message out of the Akka TestKit than "ask timeout exception".)
   *
   * <p>Anyways: An "ask timeout exception" here means that the TaskManager did not shut down after
   * its registration timeout expired.
   */
  @Test
  public void testShutdownAfterRegistrationDurationExpired() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway taskManager = null;

        try {
          // registration timeout of 1 second
          Configuration tmConfig = new Configuration();
          tmConfig.setString(ConfigConstants.TASK_MANAGER_MAX_REGISTRATION_DURATION, "500 ms");

          // start the taskManager actor
          taskManager =
              createTaskManager(
                  actorSystem,
                  JobManager.getLocalJobManagerAkkaURL(Option.<String>empty()),
                  tmConfig,
                  true,
                  false);

          // make sure it terminates in time, since it cannot register at a JobManager
          watch(taskManager.actor());

          final ActorGateway tm = taskManager;

          new Within(timeout) {

            @Override
            protected void run() {
              expectTerminated(tm.actor());
            }
          };
        } catch (Throwable e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManager);
        }
      }
    };
  }

  /**
   * Make sure that the TaskManager keeps trying to register, even after registration attempts have
   * been refused.
   */
  @Test
  public void testTaskManagerResumesConnectAfterRefusedRegistration() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway jm = null;
        ActorGateway taskManager = null;
        try {
          jm =
              TestingUtils.createForwardingActor(
                  actorSystem, getTestActor(), Option.<String>empty());
          final ActorGateway jmGateway = jm;

          // we make the test actor (the test kit) the JobManager to intercept
          // the messages
          taskManager = createTaskManager(actorSystem, jmGateway, config, true, false);

          final ActorGateway taskManagerGateway = taskManager;

          // check and decline initial registration
          new Within(timeout) {

            @Override
            protected void run() {
              // the TaskManager should try to register
              expectMsgClass(RegisterTaskManager.class);

              // we decline the registration
              taskManagerGateway.tell(
                  new RefuseRegistration(new Exception("test reason")), jmGateway);
            }
          };

          // the TaskManager should wait a bit an retry...
          FiniteDuration maxDelay =
              (FiniteDuration) TaskManager.DELAY_AFTER_REFUSED_REGISTRATION().$times(2.0);
          new Within(maxDelay) {

            @Override
            protected void run() {
              expectMsgClass(RegisterTaskManager.class);
            }
          };
        } catch (Throwable e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManager);
          stopActor(jm);
        }
      }
    };
  }

  /**
   * Validate that the TaskManager attempts to re-connect after it lost the connection to the
   * JobManager.
   */
  @Test
  public void testTaskManagerResumesConnectAfterJobManagerFailure() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway fakeJobManager1Gateway = null;
        ActorGateway fakeJobManager2Gateway = null;
        ActorGateway taskManagerGateway = null;

        final String JOB_MANAGER_NAME = "ForwardingJobManager";

        try {
          fakeJobManager1Gateway =
              TestingUtils.createForwardingActor(
                  actorSystem, getTestActor(), Option.apply(JOB_MANAGER_NAME));
          final ActorGateway fakeJM1Gateway = fakeJobManager1Gateway;

          // we make the test actor (the test kit) the JobManager to intercept
          // the messages
          taskManagerGateway =
              createTaskManager(actorSystem, fakeJobManager1Gateway, config, true, false);

          final ActorGateway tm = taskManagerGateway;

          // validate initial registration
          new Within(timeout) {

            @Override
            protected void run() {
              // the TaskManager should try to register
              expectMsgClass(RegisterTaskManager.class);

              // we accept the registration
              tm.tell(new AcknowledgeRegistration(new InstanceID(), 45234), fakeJM1Gateway);
            }
          };

          // kill the first forwarding JobManager
          watch(fakeJobManager1Gateway.actor());
          stopActor(fakeJobManager1Gateway.actor());

          final ActorGateway gateway = fakeJobManager1Gateway;

          new Within(timeout) {

            @Override
            protected void run() {
              Object message = null;

              // we might also receive RegisterTaskManager and Heartbeat messages which
              // are queued up in the testing actor's mailbox
              while (message == null || !(message instanceof Terminated)) {
                message = receiveOne(timeout);
              }

              Terminated terminatedMessage = (Terminated) message;
              assertEquals(gateway.actor(), terminatedMessage.actor());
            }
          };

          fakeJobManager1Gateway = null;

          // now start the second fake JobManager and expect that
          // the TaskManager registers again
          // the second fake JM needs to have the same actor URL

          // since we cannot reliably wait until the actor is unregistered (name is
          // available again) we loop with multiple tries for 20 seconds
          long deadline = 20000000000L + System.nanoTime();
          do {
            try {
              fakeJobManager2Gateway =
                  TestingUtils.createForwardingActor(
                      actorSystem, getTestActor(), Option.apply(JOB_MANAGER_NAME));
            } catch (InvalidActorNameException e) {
              // wait and retry
              Thread.sleep(100);
            }
          } while (fakeJobManager2Gateway == null && System.nanoTime() < deadline);

          final ActorGateway fakeJM2GatewayClosure = fakeJobManager2Gateway;

          // expect the next registration
          new Within(timeout) {

            @Override
            protected void run() {
              expectMsgClass(RegisterTaskManager.class);

              // we accept the registration
              tm.tell(new AcknowledgeRegistration(new InstanceID(), 45234), fakeJM2GatewayClosure);
            }
          };
        } catch (Throwable e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManagerGateway);
          stopActor(fakeJobManager1Gateway);
          stopActor(fakeJobManager2Gateway);
        }
      }
    };
  }

  @Test
  public void testStartupWhenNetworkStackFailsToInitialize() {

    ServerSocket blocker = null;

    try {
      blocker = new ServerSocket(0, 50, InetAddress.getByName("localhost"));

      final Configuration cfg = new Configuration();
      cfg.setString(ConfigConstants.TASK_MANAGER_HOSTNAME_KEY, "localhost");
      cfg.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, blocker.getLocalPort());
      cfg.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 1);

      new JavaTestKit(actorSystem) {
        {
          ActorRef taskManager = null;
          ActorRef jobManager = null;
          ActorRef resourceManager = null;

          try {
            // a simple JobManager
            jobManager = startJobManager(config);

            resourceManager = startResourceManager(config, jobManager);

            // start a task manager with a configuration that provides a blocked port
            taskManager =
                TaskManager.startTaskManagerComponentsAndActor(
                    cfg,
                    ResourceID.generate(),
                    actorSystem,
                    "localhost",
                    NONE_STRING, // no actor name -> random
                    new Some<LeaderRetrievalService>(
                        new StandaloneLeaderRetrievalService(jobManager.path().toString())),
                    false, // init network stack !!!
                    TaskManager.class);

            watch(taskManager);

            expectTerminated(timeout, taskManager);
          } catch (Exception e) {
            e.printStackTrace();
            fail(e.getMessage());
          } finally {
            stopActor(taskManager);
            stopActor(jobManager);
          }
        }
      };
    } catch (Exception e) {
      // does not work, skip test
      e.printStackTrace();
      fail(e.getMessage());
    } finally {
      if (blocker != null) {
        try {
          blocker.close();
        } catch (IOException e) {
          // ignore, best effort
        }
      }
    }
  }

  @Test
  public void testCheckForValidRegistrationSessionIDs() {
    new JavaTestKit(actorSystem) {
      {
        ActorGateway taskManagerGateway = null;

        try {
          // we make the test actor (the test kit) the JobManager to intercept
          // the messages
          taskManagerGateway = createTaskManager(actorSystem, getTestActor(), config, true, false);

          final ActorRef taskManager = taskManagerGateway.actor();

          final UUID falseLeaderSessionID = UUID.randomUUID();
          final UUID trueLeaderSessionID = null;

          new Within(timeout) {

            @Override
            protected void run() {
              taskManager.tell(
                  TaskManagerMessages.getNotifyWhenRegisteredAtJobManagerMessage(), getTestActor());

              // the TaskManager should try to register

              LeaderSessionMessage lsm = expectMsgClass(LeaderSessionMessage.class);

              assertTrue(lsm.leaderSessionID() == trueLeaderSessionID);
              assertTrue(lsm.message() instanceof RegisterTaskManager);

              final ActorRef tm = getLastSender();

              // This AcknowledgeRegistration message should be discarded because the
              // registration session ID is wrong
              tm.tell(
                  new LeaderSessionMessage(
                      falseLeaderSessionID, new AcknowledgeRegistration(new InstanceID(), 1)),
                  getTestActor());

              // Valid AcknowledgeRegistration message
              tm.tell(
                  new LeaderSessionMessage(
                      trueLeaderSessionID, new AcknowledgeRegistration(new InstanceID(), 1)),
                  getTestActor());

              Object message = null;
              Object confirmMessageClass =
                  TaskManagerMessages.getRegisteredAtJobManagerMessage().getClass();

              while (message == null || !(message.getClass().equals(confirmMessageClass))) {
                message = receiveOne(TestingUtils.TESTING_DURATION());
              }

              tm.tell(JobManagerMessages.getRequestLeaderSessionID(), getTestActor());

              expectMsgEquals(new JobManagerMessages.ResponseLeaderSessionID(trueLeaderSessionID));
            }
          };
        } catch (Throwable e) {
          e.printStackTrace();
          fail(e.getMessage());
        } finally {
          stopActor(taskManagerGateway);
        }
      }
    };
  }

  // --------------------------------------------------------------------------------------------
  //  Utility Functions
  // --------------------------------------------------------------------------------------------

  private static ActorRef startJobManager(Configuration configuration) throws Exception {
    // start the actors. don't give names, so they get generated names and we
    // avoid conflicts with the actor names
    return JobManager.startJobManagerActors(
            configuration,
            actorSystem,
            NONE_STRING,
            NONE_STRING,
            JobManager.class,
            MemoryArchivist.class)
        ._1();
  }

  private static ActorRef startResourceManager(Configuration config, ActorRef jobManager) {
    return FlinkResourceManager.startResourceManagerActors(
        config,
        actorSystem,
        new StandaloneLeaderRetrievalService(jobManager.path().toString()),
        StandaloneResourceManager.class);
  }
}
Exemplo n.º 6
0
 /**
  * Retrieves a configuration value as a <code>String</code>.
  *
  * @param key configuration key (relative to configuration root key)
  * @return a configuration value or <code>null</code>
  */
 @SuppressWarnings("unused")
 private static String getString(String key) {
   return Scala.orNull(
       conf.getString(
           key, scala.Option.<scala.collection.immutable.Set<java.lang.String>>empty()));
 }
  @Test
  public void testJobManagerProcessFailure() throws Exception {
    // Config
    final int numberOfJobManagers = 2;
    final int numberOfTaskManagers = 2;
    final int numberOfSlotsPerTaskManager = 2;

    assertEquals(PARALLELISM, numberOfTaskManagers * numberOfSlotsPerTaskManager);

    // Setup
    // Test actor system
    ActorSystem testActorSystem;

    // Job managers
    final JobManagerProcess[] jmProcess = new JobManagerProcess[numberOfJobManagers];

    // Task managers
    final ActorSystem[] tmActorSystem = new ActorSystem[numberOfTaskManagers];

    // Leader election service
    LeaderRetrievalService leaderRetrievalService = null;

    // Coordination between the processes goes through a directory
    File coordinateTempDir = null;

    try {
      final Deadline deadline = TestTimeOut.fromNow();

      // Coordination directory
      coordinateTempDir = createTempDirectory();

      // Job Managers
      Configuration config =
          ZooKeeperTestUtils.createZooKeeperRecoveryModeConfig(
              ZooKeeper.getConnectString(), FileStateBackendBasePath.getPath());

      // Start first process
      jmProcess[0] = new JobManagerProcess(0, config);
      jmProcess[0].createAndStart();

      // Task manager configuration
      config.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, 4);
      config.setInteger(ConfigConstants.TASK_MANAGER_NETWORK_NUM_BUFFERS_KEY, 100);
      config.setInteger(ConfigConstants.TASK_MANAGER_NUM_TASK_SLOTS, 2);

      // Start the task manager process
      for (int i = 0; i < numberOfTaskManagers; i++) {
        tmActorSystem[i] = AkkaUtils.createActorSystem(AkkaUtils.getDefaultAkkaConfig());
        TaskManager.startTaskManagerComponentsAndActor(
            config,
            tmActorSystem[i],
            "localhost",
            Option.<String>empty(),
            Option.<LeaderRetrievalService>empty(),
            false,
            TaskManager.class);
      }

      // Test actor system
      testActorSystem = AkkaUtils.createActorSystem(AkkaUtils.getDefaultAkkaConfig());

      jmProcess[0].getActorRef(testActorSystem, deadline.timeLeft());

      // Leader listener
      TestingListener leaderListener = new TestingListener();
      leaderRetrievalService = ZooKeeperUtils.createLeaderRetrievalService(config);
      leaderRetrievalService.start(leaderListener);

      // Initial submission
      leaderListener.waitForNewLeader(deadline.timeLeft().toMillis());

      String leaderAddress = leaderListener.getAddress();
      UUID leaderId = leaderListener.getLeaderSessionID();

      // Get the leader ref
      ActorRef leaderRef =
          AkkaUtils.getActorRef(leaderAddress, testActorSystem, deadline.timeLeft());
      ActorGateway leaderGateway = new AkkaActorGateway(leaderRef, leaderId);

      // Wait for all task managers to connect to the leading job manager
      JobManagerActorTestUtils.waitForTaskManagers(
          numberOfTaskManagers, leaderGateway, deadline.timeLeft());

      final File coordinateDirClosure = coordinateTempDir;
      final Throwable[] errorRef = new Throwable[1];

      // we trigger program execution in a separate thread
      Thread programTrigger =
          new Thread("Program Trigger") {
            @Override
            public void run() {
              try {
                testJobManagerFailure(ZooKeeper.getConnectString(), coordinateDirClosure);
              } catch (Throwable t) {
                t.printStackTrace();
                errorRef[0] = t;
              }
            }
          };

      // start the test program
      programTrigger.start();

      // wait until all marker files are in place, indicating that all tasks have started
      AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(
          coordinateTempDir, READY_MARKER_FILE_PREFIX, PARALLELISM, deadline.timeLeft().toMillis());

      // Kill one of the job managers and trigger recovery
      jmProcess[0].destroy();

      jmProcess[1] = new JobManagerProcess(1, config);
      jmProcess[1].createAndStart();

      jmProcess[1].getActorRef(testActorSystem, deadline.timeLeft());

      // we create the marker file which signals the program functions tasks that they can complete
      AbstractTaskManagerProcessFailureRecoveryTest.touchFile(
          new File(coordinateTempDir, PROCEED_MARKER_FILE));

      programTrigger.join(deadline.timeLeft().toMillis());

      // We wait for the finish marker file. We don't wait for the program trigger, because
      // we submit in detached mode.
      AbstractTaskManagerProcessFailureRecoveryTest.waitForMarkerFiles(
          coordinateTempDir, FINISH_MARKER_FILE_PREFIX, 1, deadline.timeLeft().toMillis());

      // check that the program really finished
      assertFalse("The program did not finish in time", programTrigger.isAlive());

      // check whether the program encountered an error
      if (errorRef[0] != null) {
        Throwable error = errorRef[0];
        error.printStackTrace();
        fail(
            "The program encountered a "
                + error.getClass().getSimpleName()
                + " : "
                + error.getMessage());
      }
    } catch (Throwable t) {
      // Print early (in some situations the process logs get too big
      // for Travis and the root problem is not shown)
      t.printStackTrace();

      for (JobManagerProcess p : jmProcess) {
        if (p != null) {
          p.printProcessLog();
        }
      }

      throw t;
    } finally {
      for (int i = 0; i < numberOfTaskManagers; i++) {
        if (tmActorSystem[i] != null) {
          tmActorSystem[i].shutdown();
        }
      }

      if (leaderRetrievalService != null) {
        leaderRetrievalService.stop();
      }

      for (JobManagerProcess jmProces : jmProcess) {
        if (jmProces != null) {
          jmProces.destroy();
        }
      }

      // Delete coordination directory
      if (coordinateTempDir != null) {
        try {
          FileUtils.deleteDirectory(coordinateTempDir);
        } catch (Throwable ignored) {
        }
      }
    }
  }
Exemplo n.º 8
0
  public static void main(String[] args) {

    Outlet<Integer> outlet = null;

    Outlet<Integer> outlet1 = null;
    Outlet<Integer> outlet2 = null;

    Inlet<Integer> inlet = null;

    Inlet<Integer> inlet1 = null;
    Inlet<Integer> inlet2 = null;

    Flow<Integer, Integer, BoxedUnit> flow = Flow.of(Integer.class);
    Flow<Integer, Integer, BoxedUnit> flow1 = Flow.of(Integer.class);
    Flow<Integer, Integer, BoxedUnit> flow2 = Flow.of(Integer.class);

    Promise<Option<Integer>> promise = null;

    {
      Graph<SourceShape<Integer>, BoxedUnit> graphSource = null;
      Graph<SinkShape<Integer>, BoxedUnit> graphSink = null;
      Graph<FlowShape<Integer, Integer>, BoxedUnit> graphFlow = null;

      // #flow-wrap
      Source<Integer, BoxedUnit> source = Source.fromGraph(graphSource);
      Sink<Integer, BoxedUnit> sink = Sink.fromGraph(graphSink);
      Flow<Integer, Integer, BoxedUnit> aflow = Flow.fromGraph(graphFlow);
      Flow.fromSinkAndSource(Sink.<Integer>head(), Source.single(0));
      Flow.fromSinkAndSourceMat(Sink.<Integer>head(), Source.single(0), Keep.left());
      // #flow-wrap

      Graph<BidiShape<Integer, Integer, Integer, Integer>, BoxedUnit> bidiGraph = null;

      // #bidi-wrap
      BidiFlow<Integer, Integer, Integer, Integer, BoxedUnit> bidiFlow =
          BidiFlow.fromGraph(bidiGraph);
      BidiFlow.fromFlows(flow1, flow2);
      BidiFlow.fromFlowsMat(flow1, flow2, Keep.both());
      // #bidi-wrap

    }

    {
      // #graph-create
      GraphDSL.create(
          builder -> {
            // ...
            return ClosedShape.getInstance();
          });

      GraphDSL.create(
          builder -> {
            // ...
            return new FlowShape<>(inlet, outlet);
          });
      // #graph-create
    }

    {
      // #graph-create-2
      GraphDSL.create(
          builder -> {
            // ...
            return SourceShape.of(outlet);
          });

      GraphDSL.create(
          builder -> {
            // ...
            return SinkShape.of(inlet);
          });

      GraphDSL.create(
          builder -> {
            // ...
            return FlowShape.of(inlet, outlet);
          });

      GraphDSL.create(
          builder -> {
            // ...
            return BidiShape.of(inlet1, outlet1, inlet2, outlet2);
          });
      // #graph-create-2
    }

    {
      // #graph-builder
      GraphDSL.create(
          builder -> {
            builder.from(outlet).toInlet(inlet);
            builder.from(outlet).via(builder.add(flow)).toInlet(inlet);
            builder.from(builder.add(Source.single(0))).to(builder.add(Sink.head()));
            // ...
            return ClosedShape.getInstance();
          });
      // #graph-builder
    }

    // #source-creators
    Source<Integer, Promise<Option<Integer>>> src = Source.<Integer>maybe();
    // Complete the promise with an empty option to emulate the old lazyEmpty
    promise.trySuccess(scala.Option.empty());

    final Source<String, Cancellable> ticks =
        Source.tick(
            FiniteDuration.create(0, TimeUnit.MILLISECONDS),
            FiniteDuration.create(200, TimeUnit.MILLISECONDS),
            "tick");

    final Source<Integer, BoxedUnit> pubSource =
        Source.fromPublisher(TestPublisher.<Integer>manualProbe(true, sys));

    final Source<Integer, BoxedUnit> futSource = Source.fromFuture(Futures.successful(42));

    final Source<Integer, Subscriber<Integer>> subSource = Source.<Integer>asSubscriber();
    // #source-creators

    // #sink-creators
    final Sink<Integer, BoxedUnit> subSink =
        Sink.fromSubscriber(TestSubscriber.<Integer>manualProbe(sys));
    // #sink-creators

    // #sink-as-publisher
    final Sink<Integer, Publisher<Integer>> pubSink = Sink.<Integer>asPublisher(false);

    final Sink<Integer, Publisher<Integer>> pubSinkFanout = Sink.<Integer>asPublisher(true);
    // #sink-as-publisher

    // #empty-flow
    Flow<Integer, Integer, BoxedUnit> emptyFlow = Flow.<Integer>create();
    // or
    Flow<Integer, Integer, BoxedUnit> emptyFlow2 = Flow.of(Integer.class);
    // #empty-flow

    // #flatMapConcat
    Flow.<Source<Integer, BoxedUnit>>create()
        .<Integer, BoxedUnit>flatMapConcat(
            new Function<Source<Integer, BoxedUnit>, Source<Integer, BoxedUnit>>() {
              @Override
              public Source<Integer, BoxedUnit> apply(Source<Integer, BoxedUnit> param)
                  throws Exception {
                return param;
              }
            });
    // #flatMapConcat

    Uri uri = null;
    // #raw-query
    final akka.japi.Option<String> theRawQueryString = uri.rawQueryString();
    // #raw-query

    // #query-param
    final akka.japi.Option<String> aQueryParam = uri.query().get("a");
    // #query-param

    // #file-source-sink
    final Source<ByteString, Future<Long>> fileSrc = FileIO.fromFile(new File("."));

    final Source<ByteString, Future<Long>> otherFileSrc = FileIO.fromFile(new File("."), 1024);

    final Sink<ByteString, Future<Long>> fileSink = FileIO.toFile(new File("."));
    // #file-source-sink

    // #input-output-stream-source-sink
    final Source<ByteString, Future<java.lang.Long>> inputStreamSrc =
        StreamConverters.fromInputStream(
            new Creator<InputStream>() {
              public InputStream create() {
                return new SomeInputStream();
              }
            });

    final Source<ByteString, Future<java.lang.Long>> otherInputStreamSrc =
        StreamConverters.fromInputStream(
            new Creator<InputStream>() {
              public InputStream create() {
                return new SomeInputStream();
              }
            },
            1024);

    final Sink<ByteString, Future<java.lang.Long>> outputStreamSink =
        StreamConverters.fromOutputStream(
            new Creator<OutputStream>() {
              public OutputStream create() {
                return new SomeOutputStream();
              }
            });
    // #input-output-stream-source-sink

    // #output-input-stream-source-sink
    final FiniteDuration timeout = FiniteDuration.Zero();

    final Source<ByteString, OutputStream> outputStreamSrc = StreamConverters.asOutputStream();

    final Source<ByteString, OutputStream> otherOutputStreamSrc =
        StreamConverters.asOutputStream(timeout);

    final Sink<ByteString, InputStream> someInputStreamSink = StreamConverters.asInputStream();

    final Sink<ByteString, InputStream> someOtherInputStreamSink =
        StreamConverters.asInputStream(timeout);
    // #output-input-stream-source-sink

  }
  private IncOptions getIncOptions() {
    // comment from SBT (sbt.inc.IncOptions.scala):
    // After which step include whole transitive closure of invalidated source files.
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Steps before transitive closure
    int transitiveStep = 3;

    // comment from SBT (sbt.inc.IncOptions.scala):
    // What's the fraction of invalidated source files when we switch to recompiling
    // all files and giving up incremental compilation altogether. That's useful in
    // cases when probability that we end up recompiling most of source files but
    // in multiple steps is high. Multi-step incremental recompilation is slower
    // than recompiling everything in one step.
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Limit before recompiling all sources
    double recompileAllFraction = 0.5d;

    // comment from SBT (sbt.inc.IncOptions.scala):
    // Print very detailed information about relations, such as dependencies between source files.
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Enable debug logging of analysis relations
    boolean relationsDebug = false;

    // comment from SBT (sbt.inc.IncOptions.scala):
    // Enable tools for debugging API changes. At the moment this option is unused but in the
    // future it will enable for example:
    //   - disabling API hashing and API minimization (potentially very memory consuming)
    //   - diffing textual API representation which helps understanding what kind of changes
    //     to APIs are visible to the incremental compiler
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Enable analysis API debugging
    boolean apiDebug = false;

    // comment from SBT (sbt.inc.IncOptions.scala):
    // Controls context size (in lines) displayed when diffs are produced for textual API
    // representation.
    //
    // This option is used only when `apiDebug == true`.
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Diff context size (in lines) for API debug
    int apiDiffContextSize = 5;

    // comment from SBT (sbt.inc.IncOptions.scala):
    // The directory where we dump textual representation of APIs. This method might be called
    // only if apiDebug returns true. This is unused option at the moment as the needed
    // functionality
    // is not implemented yet.
    //
    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Destination for analysis API dump
    Option<File> apiDumpDirectory = Option.empty();

    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Restore previous class files on failure
    boolean transactional = false;

    // comment from Zinc (com.typesafe.zinc.Settings.scala):
    // Backup location (if transactional)
    Option<File> backup = Option.empty();

    return new IncOptions(
        transitiveStep,
        recompileAllFraction,
        relationsDebug,
        apiDebug,
        apiDiffContextSize,
        apiDumpDirectory,
        transactional,
        backup);
  }