@Override
  public void postStop() throws Exception {
    super.postStop();

    log.info("Post stop on model saver");
    cluster.unsubscribe(getSelf());
  }
 public static void startFrontend(Address joinAddress) {
   ActorSystem system = ActorSystem.create(systemName);
   Cluster.get(system).join(joinAddress);
   ActorRef frontend = system.actorOf(Props.create(Frontend.class), "frontend");
   system.actorOf(Props.create(WorkProducer.class, frontend), "producer");
   system.actorOf(Props.create(WorkResultConsumer.class), "consumer");
 }
  public static Address startBackend(Address joinAddress, String role) {
    Config conf =
        ConfigFactory.parseString("akka.cluster.roles=[" + role + "]")
            .withFallback(ConfigFactory.load());
    ActorSystem system = ActorSystem.create(systemName, conf);
    Address realJoinAddress =
        (joinAddress == null) ? Cluster.get(system).selfAddress() : joinAddress;
    Cluster.get(system).join(realJoinAddress);

    system.actorOf(
        ClusterSingletonManager.defaultProps(
            Master.props(workTimeout), "active", PoisonPill.getInstance(), role),
        "master");

    return realJoinAddress;
  }
예제 #4
0
 @Test
 public void demonstrateLWWRegister() {
   // #lwwregister
   final Cluster node = Cluster.get(system);
   final LWWRegister<String> r1 = LWWRegister.create(node, "Hello");
   final LWWRegister<String> r2 = r1.withValue(node, "Hi");
   System.out.println(r1.value() + " by " + r1.updatedBy() + " at " + r1.timestamp());
   // #lwwregister
   assertEquals("Hi", r2.value());
 }
예제 #5
0
 public void demonstrateORSet() {
   // #orset
   final Cluster node = Cluster.get(system);
   final ORSet<String> s0 = ORSet.create();
   final ORSet<String> s1 = s0.add(node, "a");
   final ORSet<String> s2 = s1.add(node, "b");
   final ORSet<String> s3 = s2.remove(node, "a");
   System.out.println(s3.getElements()); // b
   // #orset
 }
예제 #6
0
 public void demonstratePNCounter() {
   // #pncounter
   final Cluster node = Cluster.get(system);
   final PNCounter c0 = PNCounter.create();
   final PNCounter c1 = c0.increment(node, 1);
   final PNCounter c2 = c1.increment(node, 7);
   final PNCounter c3 = c2.decrement(node, 2);
   System.out.println(c3.value()); // 6
   // #pncounter
 }
예제 #7
0
 public void demonstrateORMultiMap() {
   // #ormultimap
   final Cluster node = Cluster.get(system);
   final ORMultiMap<Integer> m0 = ORMultiMap.create();
   final ORMultiMap<Integer> m1 = m0.put(node, "a", new HashSet<Integer>(Arrays.asList(1, 2, 3)));
   final ORMultiMap<Integer> m2 = m1.addBinding(node, "a", 4);
   final ORMultiMap<Integer> m3 = m2.removeBinding(node, "a", 2);
   final ORMultiMap<Integer> m4 = m3.addBinding(node, "b", 1);
   System.out.println(m4.getEntries());
   // #ormultimap
 }
예제 #8
0
 public void demonstratePNCounterMap() {
   // #pncountermap
   final Cluster node = Cluster.get(system);
   final PNCounterMap m0 = PNCounterMap.create();
   final PNCounterMap m1 = m0.increment(node, "a", 7);
   final PNCounterMap m2 = m1.decrement(node, "a", 2);
   final PNCounterMap m3 = m2.increment(node, "b", 1);
   System.out.println(m3.get("a")); // 5
   System.out.println(m3.getEntries());
   // #pncountermap
 }
예제 #9
0
  @Test
  public void demonstrateUpdateWithRequestContext() {
    probe = new JavaTestKit(system);

    // #update-request-context
    final Cluster node = Cluster.get(system);
    final ActorRef replicator = DistributedData.get(system).replicator();

    final WriteConsistency writeTwo = new WriteTo(2, Duration.create(3, SECONDS));
    final Key<PNCounter> counter1Key = PNCounterKey.create("counter1");

    receive(
        ReceiveBuilder.match(
                String.class,
                a -> a.equals("increment"),
                a -> {
                  // incoming command to increase the counter
                  Optional<Object> reqContext = Optional.of(sender());
                  Replicator.Update<PNCounter> upd =
                      new Replicator.Update<PNCounter>(
                          counter1Key,
                          PNCounter.create(),
                          writeTwo,
                          reqContext,
                          curr -> curr.increment(node, 1));
                  replicator.tell(upd, self());
                })
            .match(
                UpdateSuccess.class,
                a -> a.key().equals(counter1Key),
                a -> {
                  ActorRef replyTo = (ActorRef) a.getRequest().get();
                  replyTo.tell("ack", self());
                })
            .match(
                UpdateTimeout.class,
                a -> a.key().equals(counter1Key),
                a -> {
                  ActorRef replyTo = (ActorRef) a.getRequest().get();
                  replyTo.tell("nack", self());
                })
            .build());

    // #update-request-context
  }
예제 #10
0
  public void demonstrateLWWRegisterWithCustomClock() {
    // #lwwregister-custom-clock

    final Cluster node = Cluster.get(system);
    final LWWRegister.Clock<Record> recordClock =
        new LWWRegister.Clock<Record>() {
          @Override
          public long apply(long currentTimestamp, Record value) {
            return value.version;
          }
        };

    final Record record1 = new Record(1, "Alice", "Union Square");
    final LWWRegister<Record> r1 = LWWRegister.create(node, record1);

    final Record record2 = new Record(2, "Alice", "Madison Square");
    final LWWRegister<Record> r2 = LWWRegister.create(node, record2);

    final LWWRegister<Record> r3 = r1.merge(r2);
    System.out.println(r3.value());
    // #lwwregister-custom-clock

    assertEquals("Madison Square", r3.value().address);
  }
/**
 * Listens for a neural network to save
 *
 * @author Adam Gibson
 */
public class ModelSavingActor extends UntypedActor {

  public static final String SAVE = "save";
  private String pathToSave;
  private ActorRef mediator = DistributedPubSubExtension.get(getContext().system()).mediator();
  private LoggingAdapter log = Logging.getLogger(getContext().system(), this);
  private Cluster cluster = Cluster.get(context().system());
  private ModelSaver modelSaver = new DefaultModelSaver();
  private StateTracker<Updateable<?>> stateTracker;

  public ModelSavingActor(String pathToSave, StateTracker<Updateable<?>> stateTracker) {
    this.pathToSave = pathToSave;
    modelSaver = new DefaultModelSaver(new File(pathToSave));
    this.stateTracker = stateTracker;
  }

  public ModelSavingActor(ModelSaver saver, StateTracker<Updateable<?>> stateTracker) {
    this.modelSaver = saver;
    this.stateTracker = stateTracker;
  }

  {
    mediator.tell(new DistributedPubSubMediator.Subscribe(SAVE, getSelf()), getSelf());
    // subscribe to shutdown messages
    mediator.tell(
        new DistributedPubSubMediator.Subscribe(MasterActor.SHUTDOWN, getSelf()), getSelf());
  }

  @Override
  public void postStop() throws Exception {
    super.postStop();

    log.info("Post stop on model saver");
    cluster.unsubscribe(getSelf());
  }

  @Override
  public void preStart() throws Exception {
    super.preStart();
    log.info("Pre start on model saver");
  }

  @Override
  @SuppressWarnings("unchecked")
  public void onReceive(final Object message) throws Exception {
    if (message instanceof MoreWorkMessage) {
      if (stateTracker.getCurrent() != null
          && stateTracker.getCurrent().getClass().isAssignableFrom(UpdateableImpl.class)) {
        BaseMultiLayerNetwork current = (BaseMultiLayerNetwork) stateTracker.getCurrent().get();
        if (current.getLayers() == null || current.getSigmoidLayers() == null)
          throw new IllegalStateException("Invalid model found when prompted to save..");
        current.clearInput();
        stateTracker.setCurrent(new UpdateableImpl(current));
        if (stateTracker.hasBegun()) modelSaver.save(current);
      } else if (stateTracker
          .getCurrent()
          .get()
          .getClass()
          .isAssignableFrom(DeepAutoEncoder.class)) {
        DeepAutoEncoder current = (DeepAutoEncoder) stateTracker.getCurrent().get();
        stateTracker.setCurrent(new UpdateableEncoderImpl(current));
        if (stateTracker.hasBegun()) modelSaver.save(current);
      }

    } else if (message instanceof DistributedPubSubMediator.UnsubscribeAck
        || message instanceof DistributedPubSubMediator.SubscribeAck) {
      // reply
      mediator.tell(
          new DistributedPubSubMediator.Publish(ClusterListener.TOPICS, message), getSelf());
      log.info("Sending sub/unsub over");
    } else unhandled(message);
  }
}
예제 #12
0
  @Test
  public void demonstrateUpdate() {
    probe = new JavaTestKit(system);

    // #update
    final Cluster node = Cluster.get(system);
    final ActorRef replicator = DistributedData.get(system).replicator();

    final Key<PNCounter> counter1Key = PNCounterKey.create("counter1");
    final Key<GSet<String>> set1Key = GSetKey.create("set1");
    final Key<ORSet<String>> set2Key = ORSetKey.create("set2");
    final Key<Flag> activeFlagKey = FlagKey.create("active");

    replicator.tell(
        new Replicator.Update<PNCounter>(
            counter1Key,
            PNCounter.create(),
            Replicator.writeLocal(),
            curr -> curr.increment(node, 1)),
        self());

    final WriteConsistency writeTo3 = new WriteTo(3, Duration.create(1, SECONDS));
    replicator.tell(
        new Replicator.Update<GSet<String>>(
            set1Key, GSet.create(), writeTo3, curr -> curr.add("hello")),
        self());

    final WriteConsistency writeMajority = new WriteMajority(Duration.create(5, SECONDS));
    replicator.tell(
        new Replicator.Update<ORSet<String>>(
            set2Key, ORSet.create(), writeMajority, curr -> curr.add(node, "hello")),
        self());

    final WriteConsistency writeAll = new WriteAll(Duration.create(5, SECONDS));
    replicator.tell(
        new Replicator.Update<Flag>(
            activeFlagKey, Flag.create(), writeAll, curr -> curr.switchOn()),
        self());
    // #update

    probe.expectMsgClass(UpdateSuccess.class);
    // #update-response1
    receive(
        ReceiveBuilder.match(
                UpdateSuccess.class,
                a -> a.key().equals(counter1Key),
                a -> {
                  // ok
                })
            .build());
    // #update-response1

    // #update-response2
    receive(
        ReceiveBuilder.match(
                UpdateSuccess.class,
                a -> a.key().equals(set1Key),
                a -> {
                  // ok
                })
            .match(
                UpdateTimeout.class,
                a -> a.key().equals(set1Key),
                a -> {
                  // write to 3 nodes failed within 1.second
                })
            .build());
    // #update-response2
  }