@Test
  public void testClientListenerDisconnected() throws InterruptedException {
    Config config = new Config();
    config.setProperty(GroupProperties.PROP_IO_THREAD_COUNT, "1");

    final HazelcastInstance hz = Hazelcast.newHazelcastInstance(config);
    final HazelcastInstance hz2 = Hazelcast.newHazelcastInstance(config);

    int clientCount = 10;
    ClientDisconnectedListenerLatch listenerLatch =
        new ClientDisconnectedListenerLatch(2 * clientCount);
    hz.getClientService().addClientListener(listenerLatch);
    hz2.getClientService().addClientListener(listenerLatch);

    Collection<HazelcastInstance> clients = new LinkedList<HazelcastInstance>();
    for (int i = 0; i < clientCount; i++) {
      HazelcastInstance client = HazelcastClient.newHazelcastClient();
      IMap<Object, Object> map = client.getMap(randomMapName());

      map.addEntryListener(new EntryAdapter<Object, Object>(), true);
      map.put(generateKeyOwnedBy(hz), "value");
      map.put(generateKeyOwnedBy(hz2), "value");

      clients.add(client);
    }

    ExecutorService ex = Executors.newFixedThreadPool(4);
    try {
      for (final HazelcastInstance client : clients) {
        ex.execute(
            new Runnable() {
              @Override
              public void run() {
                client.shutdown();
              }
            });
      }

      assertOpenEventually(listenerLatch, 30);

      assertTrueEventually(
          new AssertTask() {
            @Override
            public void run() throws Exception {
              assertEquals(0, hz.getClientService().getConnectedClients().size());
            }
          },
          10);
      assertTrueEventually(
          new AssertTask() {
            @Override
            public void run() throws Exception {
              assertEquals(0, hz2.getClientService().getConnectedClients().size());
            }
          },
          10);
    } finally {
      ex.shutdown();
    }
  }
  @Repeat(25)
  @Test
  public void testEachConnectionUseDifferentSelectorEventually() {
    Config config = new Config();
    config.setProperty(GroupProperties.PROP_IO_BALANCER_INTERVAL_SECONDS, "1");
    config.setProperty(GroupProperties.PROP_IO_THREAD_COUNT, "2");

    HazelcastInstance instance1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance instance2 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance instance3 = Hazelcast.newHazelcastInstance(config);

    instance2.shutdown();
    instance2 = Hazelcast.newHazelcastInstance(config);

    IMap<Integer, Integer> map = instance1.getMap(randomMapName());

    long deadLine = System.currentTimeMillis() + TEST_DURATION_SECONDS * 1000;
    for (int i = 0; System.currentTimeMillis() < deadLine; i++) {
      map.put(i % 1000, i);
    }

    assertBalanced(instance1);
    assertBalanced(instance2);
    assertBalanced(instance3);
  }
  @Test
  public void testOperationNotBlockingAfterClusterShutdown() throws InterruptedException {
    final HazelcastInstance instance1 = Hazelcast.newHazelcastInstance();
    final HazelcastInstance instance2 = Hazelcast.newHazelcastInstance();

    final ClientConfig clientConfig = new ClientConfig();
    clientConfig.getNetworkConfig().setConnectionAttemptLimit(Integer.MAX_VALUE);
    final HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);
    final IMap<String, String> m = client.getMap("m");

    m.put("elif", "Elif");
    m.put("ali", "Ali");
    m.put("alev", "Alev");

    instance1.getLifecycleService().terminate();
    instance2.getLifecycleService().terminate();

    final CountDownLatch latch = new CountDownLatch(1);
    new Thread() {
      public void run() {
        try {
          m.get("ali");
        } catch (Exception ignored) {
          latch.countDown();
        }
      }
    }.start();

    assertOpenEventually(latch);
  }
 @BeforeClass
 public static void init() {
   instance1 = Hazelcast.newHazelcastInstance();
   instance2 = Hazelcast.newHazelcastInstance();
   instance3 = Hazelcast.newHazelcastInstance();
   client = HazelcastClient.newHazelcastClient();
 }
 @Test
 public void testWANClusteringActivePassive() throws Exception {
   Config c1 = new Config();
   Config c2 = new Config();
   c1.getGroupConfig().setName("newyork");
   c1.addWanReplicationConfig(
       new WanReplicationConfig()
           .setName("my-wan")
           .addTargetClusterConfig(
               new WanTargetClusterConfig().addEndpoint("127.0.0.1:5702").setGroupName("london")));
   c1.getMapConfig("default")
       .setWanReplicationRef(
           new WanReplicationRef().setName("my-wan").setMergePolicy(PassThroughMergePolicy.NAME));
   c2.getGroupConfig().setName("london");
   c2.getMapConfig("default")
       .setWanReplicationRef(
           new WanReplicationRef().setName("my-wan").setMergePolicy(PassThroughMergePolicy.NAME));
   HazelcastInstance h10 = Hazelcast.newHazelcastInstance(c1);
   HazelcastInstance h20 = Hazelcast.newHazelcastInstance(c2);
   int size = 1000;
   MergeLatch mergeLatch2 = new MergeLatch(size);
   getConcurrentMapManager(h20).addWanMergeListener(mergeLatch2);
   for (int i = 0; i < size; i++) {
     h10.getMap("default").put(i, "value" + i);
   }
   assertTrue("Latch state: " + mergeLatch2, mergeLatch2.await(60, TimeUnit.SECONDS));
   Thread.sleep(1000);
   assertEquals(size, mergeLatch2.totalOperations());
   assertEquals(size, h10.getMap("default").size());
   assertEquals(size, h20.getMap("default").size());
   for (int i = 0; i < size; i++) {
     assertEquals("value" + i, h20.getMap("default").get(i));
   }
 }
  @Test
  public void testLatestAccessCacheMergePolicy() {
    String cacheName = randomMapName();
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(1);
    h2.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h2);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1);
    CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2);

    CacheManager cacheManager1 = cachingProvider1.getCacheManager();
    CacheManager cacheManager2 = cachingProvider2.getCacheManager();

    CacheConfig cacheConfig =
        newCacheConfig(cacheName, LatestAccessCacheMergePolicy.class.getName());

    Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig);
    Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig);

    // TODO We assume that until here and also while doing get/put, cluster is still splitted.
    // This assumptions seems fragile due to time sensitivity.

    cache1.put("key1", "value");
    assertEquals("value", cache1.get("key1")); // Access to record

    // Prevent updating at the same time
    sleepAtLeastMillis(1);

    cache2.put("key1", "LatestUpdatedValue");
    assertEquals("LatestUpdatedValue", cache2.get("key1")); // Access to record

    cache2.put("key2", "value2");
    assertEquals("value2", cache2.get("key2")); // Access to record

    // Prevent updating at the same time
    sleepAtLeastMillis(1);

    cache1.put("key2", "LatestUpdatedValue2");
    assertEquals("LatestUpdatedValue2", cache1.get("key2")); // Access to record

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    Cache cacheTest = cacheManager1.getCache(cacheName);
    assertEquals("LatestUpdatedValue", cacheTest.get("key1"));
    assertEquals("LatestUpdatedValue2", cacheTest.get("key2"));
  }
  @Test
  public void testIndexDoesNotReturnStaleResultsAfterSplit() {
    String mapName = randomMapName();
    Config config = newConfig(LatestUpdateMapMergePolicy.class.getName(), mapName);
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMembershipListener membershipListener = new TestMembershipListener(1);
    h2.getCluster().addMembershipListener(membershipListener);
    TestLifecycleListener lifecycleListener = new TestLifecycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifecycleListener);

    RealtimeCall call = new RealtimeCall();
    String key = generateKeyOwnedBy(h1);
    call.setId(key);
    call.setClusterUUID(key);

    IMap<PartitionAwareKey<String, String>, RealtimeCall> map1 = h1.getMap(mapName);
    IMap<PartitionAwareKey<String, String>, RealtimeCall> map2 = h2.getMap(mapName);

    map1.put(call.getAffinityKey(), call);

    sleepMillis(1);
    assertNotNull("entry should be in map2 before split", map2.get(call.getAffinityKey()));

    closeConnectionBetween(h1, h2);

    assertOpenEventually(membershipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    map1 = h1.getMap(mapName);
    map1.remove(call.getAffinityKey());

    sleepMillis(1);
    map2 = h2.getMap(mapName);
    assertNotNull("entry should be in map2 in split", map2.get(call.getAffinityKey()));

    assertOpenEventually(lifecycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    map1 = h1.getMap(mapName);
    assertNotNull("entry should be in map1", map1.get(call.getAffinityKey()));

    map1.remove(call.getAffinityKey());
    assertNull("map1 should be null", map1.get(call.getAffinityKey()));
    assertNull("map2 should be null", map2.get(call.getAffinityKey()));

    for (int i = 0; i < 100; i++) {
      Collection<RealtimeCall> calls = map1.values(Predicates.equal("id", call.getId()));
      System.out.println("Map 1 query by uuid: " + calls.size());
      assert calls.size() == 0;
      calls = map2.values(Predicates.equal("id", call.getId()));
      System.out.println("Map 2 query by uuid: " + calls.size());
      assert calls.size() == 0;
      sleepMillis(5);
    }
  }
Example #8
0
  @Test
  public void testSwitchingMasters() throws Exception {
    Config c1 = buildConfig(false);
    Config c2 = buildConfig(false);
    Config c3 = buildConfig(false);
    Config c4 = buildConfig(false);
    Config c5 = buildConfig(false);

    c1.getNetworkConfig().setPort(55701);
    c2.getNetworkConfig().setPort(55702);
    c3.getNetworkConfig().setPort(55703);
    c4.getNetworkConfig().setPort(55704);
    c5.getNetworkConfig().setPort(55705);

    List<String> allMembers =
        Arrays.asList(
            "127.0.0.1:55701",
            "127.0.0.1:55702",
            "127.0.0.1:55703",
            "127.0.0.1:55704",
            "127.0.0.1:55705");
    c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c4.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c5.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);

    final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
    final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
    final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);
    final HazelcastInstance h4 = Hazelcast.newHazelcastInstance(c4);
    final HazelcastInstance h5 = Hazelcast.newHazelcastInstance(c5);

    assertEquals(5, h1.getCluster().getMembers().size());
    assertEquals(5, h2.getCluster().getMembers().size());
    assertEquals(5, h3.getCluster().getMembers().size());
    assertEquals(5, h4.getCluster().getMembers().size());
    assertEquals(5, h5.getCluster().getMembers().size());

    // Need to wait for at least as long as PROP_MAX_NO_MASTER_CONFIRMATION_SECONDS
    Thread.sleep(15 * 1000);
    h1.getLifecycleService().shutdown();
    Thread.sleep(3 * 1000);

    assertEquals(4, h2.getCluster().getMembers().size());
    assertEquals(4, h3.getCluster().getMembers().size());
    assertEquals(4, h4.getCluster().getMembers().size());
    assertEquals(4, h5.getCluster().getMembers().size());

    Thread.sleep(10 * 1000);

    assertEquals(4, h2.getCluster().getMembers().size());
    assertEquals(4, h3.getCluster().getMembers().size());
    assertEquals(4, h4.getCluster().getMembers().size());
    assertEquals(4, h5.getCluster().getMembers().size());
  }
 @Test(expected = RuntimeException.class, timeout = 120000)
 public void testFailingSocketInterceptor() {
   Config config = new Config();
   config.setProperty(GroupProperties.PROP_MAX_JOIN_SECONDS, "3");
   SocketInterceptorConfig sic = new SocketInterceptorConfig();
   MySocketInterceptor mySocketInterceptor = new MySocketInterceptor(false);
   sic.setImplementation(mySocketInterceptor);
   config.getNetworkConfig().setSocketInterceptorConfig(sic);
   HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
   HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
 }
  @Test
  public void testSemaphoreSplitBrain() throws InterruptedException {
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
    final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
    final String key = generateKeyOwnedBy(h3);
    ISemaphore semaphore = h3.getSemaphore(key);
    semaphore.init(5);
    semaphore.acquire(3);
    assertEquals(2, semaphore.availablePermits());

    assertTrueEventually(
        new AssertTask() {
          @Override
          public void run() throws Exception {
            assertTrue(h3.getPartitionService().isLocalMemberSafe());
          }
        });

    TestMemberShipListener memberShipListener = new TestMemberShipListener(2);
    h3.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h3.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h3);
    closeConnectionBetween(h2, h3);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);
    assertClusterSizeEventually(1, h3);

    final ISemaphore semaphore1 = h1.getSemaphore(key);
    // when member is down, permits are released.
    // since releasing the permits is async, we use assert eventually
    assertTrueEventually(
        new AssertTask() {
          @Override
          public void run() throws Exception {
            assertEquals(5, semaphore1.availablePermits());
          }
        });
    semaphore1.acquire(4);

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(3, h1);
    assertClusterSizeEventually(3, h2);
    assertClusterSizeEventually(3, h3);

    ISemaphore testSemaphore = h3.getSemaphore(key);
    assertEquals(1, testSemaphore.availablePermits());
  }
Example #11
0
  /*
   * Sets up a situation where the member list is out of order on node2. Both
   * node2 and node1 think they are masters and both think each other are in
   * their clusters.
   */
  @Test
  public void testOutOfSyncMemberListTwoMasters() throws Exception {
    Config c1 = buildConfig(false);
    Config c2 = buildConfig(false);
    Config c3 = buildConfig(false);

    c1.getNetworkConfig().setPort(45701);
    c2.getNetworkConfig().setPort(45702);
    c3.getNetworkConfig().setPort(45703);

    List<String> allMembers = Arrays.asList("127.0.0.1:45701, 127.0.0.1:45702, 127.0.0.1:45703");
    c1.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c2.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);
    c3.getNetworkConfig().getJoin().getTcpIpConfig().setMembers(allMembers);

    final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(c1);
    final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(c2);
    final HazelcastInstance h3 = Hazelcast.newHazelcastInstance(c3);

    final MemberImpl m1 = (MemberImpl) h1.getCluster().getLocalMember();
    final MemberImpl m2 = (MemberImpl) h2.getCluster().getLocalMember();
    final MemberImpl m3 = (MemberImpl) h3.getCluster().getLocalMember();

    // All three nodes join into one cluster
    assertEquals(3, h1.getCluster().getMembers().size());
    assertEquals(3, h2.getCluster().getMembers().size());
    assertEquals(3, h3.getCluster().getMembers().size());

    final Node n2 = TestUtil.getNode(h2);

    // Simulates node2 getting an out of order member list. That causes node2 to think it's the
    // master.
    List<MemberInfo> members = new ArrayList<MemberInfo>();
    members.add(new MemberInfo(m2.getAddress(), m2.getUuid()));
    members.add(new MemberInfo(m3.getAddress(), m3.getUuid()));
    members.add(new MemberInfo(m1.getAddress(), m1.getUuid()));
    n2.clusterService.updateMembers(members);
    n2.setMasterAddress(m2.getAddress());

    // Give the cluster some time to figure things out. The merge and heartbeat code should have
    // kicked in by this point
    Thread.sleep(30 * 1000);

    assertEquals(m1, h1.getCluster().getMembers().iterator().next());
    assertEquals(m1, h2.getCluster().getMembers().iterator().next());
    assertEquals(m1, h3.getCluster().getMembers().iterator().next());

    assertEquals(3, h1.getCluster().getMembers().size());
    assertEquals(3, h2.getCluster().getMembers().size());
    assertEquals(3, h3.getCluster().getMembers().size());
  }
  @Test
  public void testQueueSplitBrain() throws InterruptedException {
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h3 = Hazelcast.newHazelcastInstance(config);
    final String name = generateKeyOwnedBy(h1);
    IQueue<Object> queue = h1.getQueue(name);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(2);
    h3.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h3.getLifecycleService().addLifecycleListener(lifeCycleListener);

    for (int i = 0; i < 100; i++) {
      queue.add("item" + i);
    }

    waitAllForSafeState();

    closeConnectionBetween(h1, h3);
    closeConnectionBetween(h2, h3);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);
    assertClusterSizeEventually(1, h3);

    for (int i = 100; i < 200; i++) {
      queue.add("item" + i);
    }

    IQueue<Object> queue3 = h3.getQueue(name);
    for (int i = 0; i < 50; i++) {
      queue3.add("lostQueueItem" + i);
    }

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(3, h1);
    assertClusterSizeEventually(3, h2);
    assertClusterSizeEventually(3, h3);

    IQueue<Object> testQueue = h1.getQueue(name);
    assertFalse(testQueue.contains("lostQueueItem0"));
    assertFalse(testQueue.contains("lostQueueItem49"));
    assertTrue(testQueue.contains("item0"));
    assertTrue(testQueue.contains("item199"));
    assertTrue(testQueue.contains("item121"));
    assertTrue(testQueue.contains("item45"));
  }
  @Test(timeout = 300000)
  public void testJoinWhenMemberClosedInBetween() throws InterruptedException {
    // Test is expecting to all can join safely.
    // On the failed case the last opened instance throws java.lang.IllegalStateException: Node
    // failed to start!
    Config config = new Config();
    HazelcastInstance i1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance i2 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance i3 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance i4 = Hazelcast.newHazelcastInstance(config);

    final IMap<Integer, Integer> map = i4.getMap("a");
    int numThreads = 40;
    final int loop = 5000;

    Thread[] threads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
      threads[i] =
          new Thread(
              new Runnable() {
                public void run() {
                  Random random = new Random();
                  for (int j = 0; j < loop; j++) {
                    int op = random.nextInt(3);
                    if (op == 0) {
                      map.put(j, j);
                    } else if (op == 1) {
                      Integer val = map.remove(j);
                      assert val == null || val.equals(j);
                    } else {
                      Integer val = map.get(j);
                      assert val == null || val.equals(j);
                    }
                  }
                }
              });
      threads[i].start();
    }

    i1.shutdown();
    i2.shutdown();
    i3.shutdown();

    // Should not throw java.lang.IllegalStateException: Node failed to start!
    Hazelcast.newHazelcastInstance(config);

    for (int i = 0; i < numThreads; i++) {
      threads[i].join();
    }
  }
  @BeforeClass
  public static void setUp() {
    Config config = new Config();
    config.setProperty("hazelcast.partition.count", "" + PARTITION_COUNT);

    instance = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance remoteInstance = Hazelcast.newHazelcastInstance(config);
    warmUpPartitions(instance, remoteInstance);

    ClientConfig clientconfig = new ClientConfig();
    clientconfig.setProperty("hazelcast.partition.count", "" + PARTITION_COUNT);

    client = HazelcastClient.newHazelcastClient(clientconfig);
  }
Example #15
0
 @Test
 @Ignore
 public void testCallState() throws Exception {
   Config config = new Config();
   final HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
   final HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);
   final Node node1 = getNode(h1);
   final Node node2 = getNode(h2);
   Thread.sleep(100);
   final CountDownLatch latch = new CountDownLatch(1);
   final IMap imap1 = h1.getMap("default");
   new Thread(
           new Runnable() {
             public void run() {
               imap1.lock("1");
               latch.countDown();
             }
           })
       .start();
   latch.await();
   //        final IMap imap2 = h2.getMap("default");
   final AtomicInteger threadId = new AtomicInteger();
   new Thread(
           new Runnable() {
             public void run() {
               ThreadContext.get().setCurrentFactory(node1.factory);
               threadId.set(ThreadContext.get().getThreadId());
               imap1.put("1", "value1");
             }
           })
       .start();
   Thread.sleep(1000);
   System.out.println(node1.getThisAddress() + " thread " + threadId.get());
   CallState callState1 =
       node1.getSystemLogService().getCallState(node1.getThisAddress(), threadId.get());
   if (callState1 != null) {
     for (Object callStateLog : callState1.getLogs()) {
       System.out.println(callStateLog);
     }
   }
   CallState callState2 =
       node2.getSystemLogService().getCallState(node1.getThisAddress(), threadId.get());
   System.out.println("========================");
   if (callState2 != null) {
     for (Object callStateLog : callState2.getLogs()) {
       System.out.println(callStateLog);
     }
   }
 }
  @Test
  public void testNumberOfClients_afterUnAuthenticatedClient_withTwoNode() {
    final HazelcastInstance instance1 = Hazelcast.newHazelcastInstance();
    final HazelcastInstance instance2 = Hazelcast.newHazelcastInstance();
    final ClientConfig clientConfig = new ClientConfig();
    clientConfig.getGroupConfig().setPassword("wrongPassword");

    try {
      HazelcastClient.newHazelcastClient(clientConfig);
    } catch (IllegalStateException ignored) {

    }

    assertEquals(0, instance1.getClientService().getConnectedClients().size());
    assertEquals(0, instance2.getClientService().getConnectedClients().size());
  }
 @Test
 public void testTransactionAtomicity_whenMultiMapGetIsUsed_withTransaction()
     throws InterruptedException {
   final HazelcastInstance hz = Hazelcast.newHazelcastInstance(createConfigWithDummyTxService());
   final String name = HazelcastTestSupport.generateRandomString(5);
   Thread producerThread = startProducerThread(hz, name);
   try {
     IQueue<String> q = hz.getQueue(name);
     for (int i = 0; i < 1000; i++) {
       String id = q.poll();
       if (id != null) {
         TransactionContext tx = hz.newTransactionContext();
         try {
           tx.beginTransaction();
           TransactionalMultiMap<Object, Object> multiMap = tx.getMultiMap(name);
           Collection<Object> values = multiMap.get(id);
           assertFalse(values.isEmpty());
           multiMap.remove(id);
           tx.commitTransaction();
         } catch (TransactionException e) {
           tx.rollbackTransaction();
           e.printStackTrace();
         }
       } else {
         LockSupport.parkNanos(100);
       }
     }
   } finally {
     stopProducerThread(producerThread);
   }
 }
Example #18
0
  @Test
  public void testInitDNodeList() throws Throwable {
    SploutConfiguration config = SploutConfiguration.getTestConfig();
    QNodeHandler handler = new QNodeHandler();
    try {
      HazelcastInstance hz = Hazelcast.newHazelcastInstance(HazelcastConfigBuilder.build(config));
      CoordinationStructures coord = new CoordinationStructures(hz);

      SploutConfiguration dNodeConfig = SploutConfiguration.getTestConfig();
      dNodeConfig.setProperty(DNodeProperties.PORT, 1000);

      coord.getDNodes().put("/localhost:1000", new DNodeInfo(dNodeConfig));

      dNodeConfig = SploutConfiguration.getTestConfig();
      dNodeConfig.setProperty(DNodeProperties.PORT, 1001);

      coord.getDNodes().put("/localhost:1001", new DNodeInfo(dNodeConfig));

      try {
        handler.init(config);
      } catch (Exception e) {
        // since the handler will try to connect to "localhost:1000" we skip the Exception and
        // continue
        // the things we want to assert should be present anyway.
      }
      Assert.assertEquals(
          handler.getContext().getCoordinationStructures().getDNodes().values().size(), 2);
    } finally {
      handler.close();
      Hazelcast.shutdownAll();
    }
  }
  @Test(expected = NullPointerException.class)
  public void testRemoveClientListener_whenIdIsNull() {
    HazelcastInstance instance = Hazelcast.newHazelcastInstance();

    ClientService clientService = instance.getClientService();
    clientService.removeClientListener(null);
  }
  @Test
  public void putFromMultipleThreads() throws InterruptedException {
    final HazelcastInstance h = Hazelcast.newHazelcastInstance(null);
    final AtomicInteger counter = new AtomicInteger(0);
    class Putter implements Runnable {
      volatile Boolean run = true;

      public void run() {
        HazelcastClient hClient = TestUtility.newHazelcastClient(h);
        while (run) {
          Map<String, String> clientMap = hClient.getMap("putFromMultipleThreads");
          clientMap.put(String.valueOf(counter.incrementAndGet()), String.valueOf(counter.get()));
        }
      }
    };
    List<Putter> list = new ArrayList<Putter>();
    for (int i = 0; i < 10; i++) {
      Putter p = new Putter();
      list.add(p);
      new Thread(p).start();
    }
    Thread.sleep(5000);
    for (Iterator<Putter> it = list.iterator(); it.hasNext(); ) {
      Putter p = it.next();
      p.run = false;
    }
    Thread.sleep(100);
    assertEquals(counter.get(), h.getMap("putFromMultipleThreads").size());
  }
    public static void main(String[] args) throws Exception {
        HazelcastInstance hz = Hazelcast.newHazelcastInstance();
        Cluster cluster = hz.getCluster();

        cluster.addMembershipListener(new MembershipListener() {
            @Override
            public void memberAdded(MembershipEvent membershipEvent) {
                System.out.println( "********** MemberAdded " + membershipEvent );
            }

            @Override
            public void memberRemoved(MembershipEvent membershipEvent) {
                System.out.println( "********** MemberRemoved " + membershipEvent );
            }

            @Override
            public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) {
                System.out.println( "********** MemberAttributeChanged " + memberAttributeEvent );
            }
        });
Member localMember = cluster.getLocalMember();
        System.out.println ( "********** my inetAddress= " + localMember.getInetSocketAddress() );


    }
  protected HazelcastInstance hazelcastInstance() {
    Config config = new Config();

    config.addCacheConfig(
        new CacheSimpleConfig()
            .setName("result-cache")
            .setInMemoryFormat(InMemoryFormat.BINARY)
            .setEvictionConfig(new EvictionConfig().setEvictionPolicy(EvictionPolicy.LRU))
            .setExpiryPolicyFactoryConfig(
                new CacheSimpleConfig.ExpiryPolicyFactoryConfig(
                    new CacheSimpleConfig.ExpiryPolicyFactoryConfig.TimedExpiryPolicyFactoryConfig(
                        CacheSimpleConfig.ExpiryPolicyFactoryConfig.TimedExpiryPolicyFactoryConfig
                            .ExpiryPolicyType.ACCESSED,
                        new CacheSimpleConfig.ExpiryPolicyFactoryConfig.DurationConfig(
                            20, TimeUnit.MINUTES)))));

    NetworkConfig networkConfig = new NetworkConfig();
    networkConfig.setPort(5712).setPortAutoIncrement(true);

    config.setNetworkConfig(networkConfig);
    config.setGroupConfig(new GroupConfig("ach-validation", "ach-validation"));

    TcpClusterConfigurator.configureNetwork(config.getNetworkConfig());
    TcpClusterConfigurator.configureGroup(config.getGroupConfig());

    return Hazelcast.newHazelcastInstance(config);
  }
  private HazelcastInstance defaultConfig(final ClusteringConfig clusteringConfig) {
    final Config hazelcastConfig = new Config();

    hazelcastConfig.setGroupConfig(
        new GroupConfig(clusteringConfig.getClusterName(), clusteringConfig.getClusterPassword()));

    final NetworkConfig networkConfig = new NetworkConfig();

    networkConfig.setPort(clusteringConfig.getMulticastPort());

    final JoinConfig joinConfig = new JoinConfig();

    final MulticastConfig multicastConfig = new MulticastConfig();

    multicastConfig.setMulticastPort(clusteringConfig.getMulticastPort());

    multicastConfig.setMulticastGroup(clusteringConfig.getMulticastGroup());

    joinConfig.setMulticastConfig(multicastConfig);

    networkConfig.setJoin(joinConfig);

    hazelcastConfig.setNetworkConfig(networkConfig);

    return Hazelcast.newHazelcastInstance(hazelcastConfig);
  }
  public static void main(String[] args) {
    HazelcastInstance hz = Hazelcast.newHazelcastInstance();
    IAtomicLong atomicLong = hz.getAtomicLong("counter");

    atomicLong.set(1);
    long result = atomicLong.apply(new Add2Function());
    System.out.println("apply.result:" + result);
    System.out.println("apply.value:" + atomicLong.get());

    atomicLong.set(1);
    atomicLong.alter(new Add2Function());
    System.out.println("alter.value:" + atomicLong.get());

    atomicLong.set(1);
    result = atomicLong.alterAndGet(new Add2Function());
    System.out.println("alterAndGet.result:" + result);
    System.out.println("alterAndGet.value:" + atomicLong.get());

    atomicLong.set(1);
    result = atomicLong.getAndAlter(new Add2Function());
    System.out.println("getAndAlter.result:" + result);
    System.out.println("getAndAlter.value:" + atomicLong.get());

    System.exit(0);

    for (; ; ) {
      long oldValue = atomicLong.get();
      long newValue = oldValue + 2;
      if (atomicLong.compareAndSet(oldValue, newValue)) {
        break;
      }
    }
  }
 // public API
 public MyHazelcastInstance(final MapStore store, final String storeType) {
   this.store = store;
   this.storeType = storeType;
   log.info("Creating Hazelcast CEP instance..");
   instanceName = UUID.randomUUID().toString();
   Hazelcast.newHazelcastInstance(getConfig());
   log.info("Created CEP instance.");
 }
  @Test
  public void testRemoveClientListener_whenNonExistingId() {
    HazelcastInstance instance = Hazelcast.newHazelcastInstance();

    ClientService clientService = instance.getClientService();

    clientService.removeClientListener("foobar");
  }
  /* github issue #183 */
  @Test
  public void testKeyBasedListeners() throws InterruptedException {
    try {
      Config config = new Config();
      HazelcastInstance instance = Hazelcast.newHazelcastInstance(config);
      IMap<String, String> map = instance.getMap("map");
      map.put("key1", "value1");
      map.put("key2", "value2");
      map.put("key3", "value3");

      ClientConfig clientConfig = new ClientConfig();
      HazelcastInstance client = HazelcastClient.newHazelcastClient(clientConfig);

      final AtomicInteger count = new AtomicInteger(0);
      IMap<String, String> clientMap = client.getMap("map");

      clientMap.addEntryListener(
          new EntryListener<String, String>() {
            public void entryAdded(EntryEvent<String, String> entryEvent) {
              count.incrementAndGet();
            }

            public void entryRemoved(EntryEvent<String, String> entryEvent) {}

            public void entryUpdated(EntryEvent<String, String> entryEvent) {
              count.incrementAndGet();
            }

            public void entryEvicted(EntryEvent<String, String> entryEvent) {}
          },
          "key1",
          true);

      clientMap.addEntryListener(
          new EntryListener<String, String>() {
            public void entryAdded(EntryEvent<String, String> entryEvent) {
              count.incrementAndGet();
            }

            public void entryRemoved(EntryEvent<String, String> entryEvent) {}

            public void entryUpdated(EntryEvent<String, String> entryEvent) {
              System.out.println("event map");
              count.incrementAndGet();
            }

            public void entryEvicted(EntryEvent<String, String> entryEvent) {}
          },
          "key2",
          true);

      map.put("key1", "new-value1");
      Thread.sleep(100);
      Assert.assertEquals(count.get(), 1);
    } catch (InterruptedException e) {
      e.printStackTrace();
    }
  }
  @Test
  public void testCustomCacheMergePolicy() {
    String cacheName = randomMapName();
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(1);
    h2.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h2);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1);
    CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2);

    CacheManager cacheManager1 = cachingProvider1.getCacheManager();
    CacheManager cacheManager2 = cachingProvider2.getCacheManager();

    CacheConfig cacheConfig = newCacheConfig(cacheName, CustomCacheMergePolicy.class.getName());

    Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig);
    Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig);

    // TODO We assume that until here and also while doing get/put, cluster is still splitted.
    // This assumptions seems fragile due to time sensitivity.

    String key = generateKeyOwnedBy(h1);
    cache1.put(key, "value");

    cache2.put(key, Integer.valueOf(1));

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    Cache cacheTest = cacheManager2.getCache(cacheName);
    assertNotNull(cacheTest.get(key));
    assertTrue(cacheTest.get(key) instanceof Integer);
  }
  @Before
  public void init() {
    Config config = new Config();
    config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
    config.getNetworkConfig().setPort(5701);
    config.getGroupConfig().setName("cluster1");
    config.getGroupConfig().setPassword("cluster1pass");

    final HazelcastInstance hz1 = Hazelcast.newHazelcastInstance(config);

    Config config2 = new Config();
    config2.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
    config2.getNetworkConfig().setPort(5702);
    config2.getGroupConfig().setName("cluster2");
    config2.getGroupConfig().setPassword("cluster2pass");

    final HazelcastInstance hz2 = Hazelcast.newHazelcastInstance(config2);
  }
  /**
   * Create HazelcastInstance bean.
   *
   * @param hazelcastConfigLocation String representation of hazelcast xml config.
   * @param resourceLoader resource loader for loading hazelcast xml configuration resource.
   * @return HazelcastInstance bean.
   * @throws IOException if parsing of hazelcast xml configuration fails
   */
  @Bean
  public HazelcastInstance hazelcast(
      @Value("${hz.config.location:NO_CONFIG_PROVIDED}") final String hazelcastConfigLocation,
      final ResourceLoader resourceLoader)
      throws IOException {

    final Config hzConfig = getConfig(resourceLoader.getResource(hazelcastConfigLocation));
    return Hazelcast.newHazelcastInstance(hzConfig);
  }