@Test public void testLatestAccessCacheMergePolicy() { String cacheName = randomMapName(); Config config = newConfig(); HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config); HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config); TestMemberShipListener memberShipListener = new TestMemberShipListener(1); h2.getCluster().addMembershipListener(memberShipListener); TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1); h2.getLifecycleService().addLifecycleListener(lifeCycleListener); closeConnectionBetween(h1, h2); assertOpenEventually(memberShipListener.latch); assertClusterSizeEventually(1, h1); assertClusterSizeEventually(1, h2); CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1); CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2); CacheManager cacheManager1 = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); CacheConfig cacheConfig = newCacheConfig(cacheName, LatestAccessCacheMergePolicy.class.getName()); Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig); Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig); // TODO We assume that until here and also while doing get/put, cluster is still splitted. // This assumptions seems fragile due to time sensitivity. cache1.put("key1", "value"); assertEquals("value", cache1.get("key1")); // Access to record // Prevent updating at the same time sleepAtLeastMillis(1); cache2.put("key1", "LatestUpdatedValue"); assertEquals("LatestUpdatedValue", cache2.get("key1")); // Access to record cache2.put("key2", "value2"); assertEquals("value2", cache2.get("key2")); // Access to record // Prevent updating at the same time sleepAtLeastMillis(1); cache1.put("key2", "LatestUpdatedValue2"); assertEquals("LatestUpdatedValue2", cache1.get("key2")); // Access to record assertOpenEventually(lifeCycleListener.latch); assertClusterSizeEventually(2, h1); assertClusterSizeEventually(2, h2); Cache cacheTest = cacheManager1.getCache(cacheName); assertEquals("LatestUpdatedValue", cacheTest.get("key1")); assertEquals("LatestUpdatedValue2", cacheTest.get("key2")); }
@Before public void init() { factory = new TestHazelcastInstanceFactory(2); HazelcastInstance hz1 = factory.newHazelcastInstance(); HazelcastInstance hz2 = factory.newHazelcastInstance(); cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(hz1); cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(hz2); }
// Issue https://github.com/hazelcast/hazelcast/issues/5865 @Test public void testCompletionTestByPuttingAndRemovingFromDifferentNodes() throws InterruptedException { String cacheName = "simpleCache"; CacheManager cacheManager1 = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); CacheConfig<Integer, String> config = new CacheConfig<Integer, String>(); final SimpleEntryListener<Integer, String> listener = new SimpleEntryListener<Integer, String>(); MutableCacheEntryListenerConfiguration<Integer, String> listenerConfiguration = new MutableCacheEntryListenerConfiguration<Integer, String>( FactoryBuilder.factoryOf(listener), null, true, true); config.addCacheEntryListenerConfiguration(listenerConfiguration); Cache<Integer, String> cache1 = cacheManager1.createCache(cacheName, config); Cache<Integer, String> cache2 = cacheManager2.getCache(cacheName); assertNotNull(cache1); assertNotNull(cache2); Integer key1 = 1; String value1 = "value1"; cache1.put(key1, value1); assertTrueEventually( new AssertTask() { @Override public void run() throws Exception { assertEquals(1, listener.created.get()); } }); Integer key2 = 2; String value2 = "value2"; cache1.put(key2, value2); assertTrueEventually( new AssertTask() { @Override public void run() throws Exception { assertEquals(2, listener.created.get()); } }); Set<Integer> keys = new HashSet<Integer>(); keys.add(key1); keys.add(key2); cache2.removeAll(keys); assertTrueEventually( new AssertTask() { @Override public void run() throws Exception { assertEquals(2, listener.removed.get()); } }); }
@Test public void removeRecordWithEntryProcessor() { final int ENTRY_COUNT = 10; CachingProvider cachingProvider = HazelcastServerCachingProvider.createCachingProvider(node1); CacheManager cacheManager = cachingProvider.getCacheManager(); CompleteConfiguration<Integer, String> cacheConfig = new MutableConfiguration<Integer, String>().setTypes(Integer.class, String.class); ICache<Integer, String> cache = cacheManager.createCache("MyCache", cacheConfig).unwrap(ICache.class); for (int i = 0; i < ENTRY_COUNT; i++) { cache.put(i * 1000, "Value-" + (i * 1000)); } assertEquals(ENTRY_COUNT, cache.size()); for (int i = 0; i < ENTRY_COUNT; i++) { if (i % 2 == 0) { cache.invoke(i * 1000, new RemoveRecordEntryProcessor()); } } assertEquals(ENTRY_COUNT / 2, cache.size()); }
@Test public void testCustomCacheMergePolicy() { String cacheName = randomMapName(); Config config = newConfig(); HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config); HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config); TestMemberShipListener memberShipListener = new TestMemberShipListener(1); h2.getCluster().addMembershipListener(memberShipListener); TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1); h2.getLifecycleService().addLifecycleListener(lifeCycleListener); closeConnectionBetween(h1, h2); assertOpenEventually(memberShipListener.latch); assertClusterSizeEventually(1, h1); assertClusterSizeEventually(1, h2); CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1); CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2); CacheManager cacheManager1 = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); CacheConfig cacheConfig = newCacheConfig(cacheName, CustomCacheMergePolicy.class.getName()); Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig); Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig); // TODO We assume that until here and also while doing get/put, cluster is still splitted. // This assumptions seems fragile due to time sensitivity. String key = generateKeyOwnedBy(h1); cache1.put(key, "value"); cache2.put(key, Integer.valueOf(1)); assertOpenEventually(lifeCycleListener.latch); assertClusterSizeEventually(2, h1); assertClusterSizeEventually(2, h2); Cache cacheTest = cacheManager2.getCache(cacheName); assertNotNull(cacheTest.get(key)); assertTrue(cacheTest.get(key) instanceof Integer); }
@Before public void setUp() throws Exception { final String cacheName = CACHE_NAME_PREFIX + randomString(); cache1 = (ICache<Integer, String>) cachingProvider1.getCacheManager().<Integer, String>getCache(cacheName); cache2 = (ICache<Integer, String>) cachingProvider2.getCacheManager().<Integer, String>getCache(cacheName); cache3 = (ICache<Integer, String>) cachingProvider3.getCacheManager().<Integer, String>getCache(cacheName); cache4 = (ICache<Integer, String>) cachingProvider4.getCacheManager().<Integer, String>getCache(cacheName); cache5 = (ICache<Integer, String>) cachingProvider5.getCacheManager().<Integer, String>getCache(cacheName); }
@Test public void testCachesDestroyFromOtherManagers() { CacheManager cacheManager = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); MutableConfiguration configuration = new MutableConfiguration(); final Cache c1 = cacheManager.createCache("c1", configuration); final Cache c2 = cacheManager2.createCache("c2", configuration); c1.put("key", "value"); c2.put("key", "value"); cacheManager.close(); assertTrueAllTheTime( new AssertTask() { @Override public void run() throws Exception { c2.get("key"); } }, 10); }
@Test public void testPassThroughCacheMergePolicy() { String cacheName = randomMapName(); Config config = newConfig(); HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config); HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config); TestMemberShipListener memberShipListener = new TestMemberShipListener(1); h2.getCluster().addMembershipListener(memberShipListener); TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1); h2.getLifecycleService().addLifecycleListener(lifeCycleListener); closeConnectionBetween(h1, h2); assertOpenEventually(memberShipListener.latch); assertClusterSizeEventually(1, h1); assertClusterSizeEventually(1, h2); CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1); CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2); CacheManager cacheManager1 = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); CacheConfig cacheConfig = newCacheConfig(cacheName, PassThroughCacheMergePolicy.class.getName()); Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig); Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig); String key = generateKeyOwnedBy(h1); cache1.put(key, "value"); cache2.put(key, "passThroughValue"); assertOpenEventually(lifeCycleListener.latch); assertClusterSizeEventually(2, h1); assertClusterSizeEventually(2, h2); Cache cacheTest = cacheManager2.getCache(cacheName); assertEquals("passThroughValue", cacheTest.get(key)); }
@BeforeClass public static void initialize() throws InterruptedException { CacheSimpleConfig cacheConfig = new CacheSimpleConfig(); cacheConfig.setName(CACHE_NAME_PREFIX + "*"); cacheConfig.setQuorumName(QUORUM_ID); QuorumConfig quorumConfig = new QuorumConfig(); quorumConfig.setName(QUORUM_ID); quorumConfig.setType(QuorumType.READ_WRITE); quorumConfig.setEnabled(true); quorumConfig.setSize(3); PartitionedCluster cluster = new PartitionedCluster(new TestHazelcastInstanceFactory()) .partitionFiveMembersThreeAndTwo(cacheConfig, quorumConfig); cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(cluster.h1); cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(cluster.h2); cachingProvider3 = HazelcastServerCachingProvider.createCachingProvider(cluster.h3); cachingProvider4 = HazelcastServerCachingProvider.createCachingProvider(cluster.h4); cachingProvider5 = HazelcastServerCachingProvider.createCachingProvider(cluster.h5); }
private void executeEntryProcessor( Integer key, EntryProcessor<Integer, String, Void> entryProcessor, String cacheName) { CachingProvider cachingProvider = HazelcastServerCachingProvider.createCachingProvider(node1); CacheManager cacheManager = cachingProvider.getCacheManager(); CompleteConfiguration<Integer, String> config = new MutableConfiguration<Integer, String>().setTypes(Integer.class, String.class); Cache<Integer, String> cache = cacheManager.createCache(cacheName, config); cache.invoke(key, entryProcessor); }
@Test public void testCachesDestroy() { CacheManager cacheManager = cachingProvider1.getCacheManager(); CacheManager cacheManager2 = cachingProvider2.getCacheManager(); MutableConfiguration configuration = new MutableConfiguration(); final Cache c1 = cacheManager.createCache("c1", configuration); final Cache c2 = cacheManager2.getCache("c1"); c1.put("key", "value"); cacheManager.destroyCache("c1"); assertTrueEventually( new AssertTask() { @Override public void run() throws Exception { try { c2.get("key"); throw new AssertionError("get should throw IllegalStateException"); } catch (IllegalStateException e) { // ignored as expected } } }); }
@Test public void test_CacheReplicationOperation_serialization() throws Exception { TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(1); HazelcastInstance hazelcastInstance = factory.newHazelcastInstance(); try { CachingProvider provider = HazelcastServerCachingProvider.createCachingProvider(hazelcastInstance); CacheManager manager = provider.getCacheManager(); CompleteConfiguration configuration = new MutableConfiguration(); Cache cache1 = manager.createCache("cache1", configuration); Cache cache2 = manager.createCache("cache2", configuration); Cache cache3 = manager.createCache("cache3", configuration); for (int i = 0; i < 1000; i++) { cache1.put("key" + i, i); cache2.put("key" + i, i); cache3.put("key" + i, i); } HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) hazelcastInstance; Field original = HazelcastInstanceProxy.class.getDeclaredField("original"); original.setAccessible(true); HazelcastInstanceImpl impl = (HazelcastInstanceImpl) original.get(proxy); NodeEngineImpl nodeEngine = impl.node.nodeEngine; CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME); int partitionCount = nodeEngine.getPartitionService().getPartitionCount(); for (int partitionId = 0; partitionId < partitionCount; partitionId++) { CachePartitionSegment segment = cacheService.getSegment(partitionId); CacheReplicationOperation operation = new CacheReplicationOperation(segment, 1); Data serialized = service.toData(operation); try { service.toObject(serialized); } catch (Exception e) { throw new Exception("Partition: " + partitionId, e); } } } finally { factory.shutdownAll(); } }
@Test public void testJSRExample1() throws InterruptedException { final String cacheName = randomString(); CacheManager cacheManager = cachingProvider1.getCacheManager(); assertNotNull(cacheManager); assertNull(cacheManager.getCache(cacheName)); CacheConfig<Integer, String> config = new CacheConfig<Integer, String>(); Cache<Integer, String> cache = cacheManager.createCache(cacheName, config); assertNotNull(cache); assertTrueEventually( new AssertTask() { @Override public void run() throws Exception { CacheManager cm2 = cachingProvider2.getCacheManager(); assertNotNull(cm2.getCache(cacheName)); } }); Integer key = 1; String value1 = "value"; cache.put(key, value1); String value2 = cache.get(key); assertEquals(value1, value2); cache.remove(key); assertNull(cache.get(key)); Cache<Integer, String> cache2 = cacheManager.getCache(cacheName); assertNotNull(cache2); key = 1; value1 = "value"; cache.put(key, value1); value2 = cache.get(key); assertEquals(value1, value2); cache.remove(key); assertNull(cache.get(key)); cacheManager.destroyCache(cacheName); cacheManager.close(); }
@After public void tear() { cachingProvider1.close(); cachingProvider2.close(); factory.shutdownAll(); }