@Test
  public void testLatestAccessCacheMergePolicy() {
    String cacheName = randomMapName();
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(1);
    h2.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h2);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1);
    CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2);

    CacheManager cacheManager1 = cachingProvider1.getCacheManager();
    CacheManager cacheManager2 = cachingProvider2.getCacheManager();

    CacheConfig cacheConfig =
        newCacheConfig(cacheName, LatestAccessCacheMergePolicy.class.getName());

    Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig);
    Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig);

    // TODO We assume that until here and also while doing get/put, cluster is still splitted.
    // This assumptions seems fragile due to time sensitivity.

    cache1.put("key1", "value");
    assertEquals("value", cache1.get("key1")); // Access to record

    // Prevent updating at the same time
    sleepAtLeastMillis(1);

    cache2.put("key1", "LatestUpdatedValue");
    assertEquals("LatestUpdatedValue", cache2.get("key1")); // Access to record

    cache2.put("key2", "value2");
    assertEquals("value2", cache2.get("key2")); // Access to record

    // Prevent updating at the same time
    sleepAtLeastMillis(1);

    cache1.put("key2", "LatestUpdatedValue2");
    assertEquals("LatestUpdatedValue2", cache1.get("key2")); // Access to record

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    Cache cacheTest = cacheManager1.getCache(cacheName);
    assertEquals("LatestUpdatedValue", cacheTest.get("key1"));
    assertEquals("LatestUpdatedValue2", cacheTest.get("key2"));
  }
 @Before
 public void init() {
   factory = new TestHazelcastInstanceFactory(2);
   HazelcastInstance hz1 = factory.newHazelcastInstance();
   HazelcastInstance hz2 = factory.newHazelcastInstance();
   cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(hz1);
   cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(hz2);
 }
  @Test
  public void removeRecordWithEntryProcessor() {
    final int ENTRY_COUNT = 10;

    CachingProvider cachingProvider = HazelcastServerCachingProvider.createCachingProvider(node1);
    CacheManager cacheManager = cachingProvider.getCacheManager();
    CompleteConfiguration<Integer, String> cacheConfig =
        new MutableConfiguration<Integer, String>().setTypes(Integer.class, String.class);
    ICache<Integer, String> cache =
        cacheManager.createCache("MyCache", cacheConfig).unwrap(ICache.class);

    for (int i = 0; i < ENTRY_COUNT; i++) {
      cache.put(i * 1000, "Value-" + (i * 1000));
    }

    assertEquals(ENTRY_COUNT, cache.size());

    for (int i = 0; i < ENTRY_COUNT; i++) {
      if (i % 2 == 0) {
        cache.invoke(i * 1000, new RemoveRecordEntryProcessor());
      }
    }

    assertEquals(ENTRY_COUNT / 2, cache.size());
  }
  @Test
  public void testCustomCacheMergePolicy() {
    String cacheName = randomMapName();
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(1);
    h2.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h2);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1);
    CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2);

    CacheManager cacheManager1 = cachingProvider1.getCacheManager();
    CacheManager cacheManager2 = cachingProvider2.getCacheManager();

    CacheConfig cacheConfig = newCacheConfig(cacheName, CustomCacheMergePolicy.class.getName());

    Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig);
    Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig);

    // TODO We assume that until here and also while doing get/put, cluster is still splitted.
    // This assumptions seems fragile due to time sensitivity.

    String key = generateKeyOwnedBy(h1);
    cache1.put(key, "value");

    cache2.put(key, Integer.valueOf(1));

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    Cache cacheTest = cacheManager2.getCache(cacheName);
    assertNotNull(cacheTest.get(key));
    assertTrue(cacheTest.get(key) instanceof Integer);
  }
  @Test
  public void testPassThroughCacheMergePolicy() {
    String cacheName = randomMapName();
    Config config = newConfig();
    HazelcastInstance h1 = Hazelcast.newHazelcastInstance(config);
    HazelcastInstance h2 = Hazelcast.newHazelcastInstance(config);

    TestMemberShipListener memberShipListener = new TestMemberShipListener(1);
    h2.getCluster().addMembershipListener(memberShipListener);
    TestLifeCycleListener lifeCycleListener = new TestLifeCycleListener(1);
    h2.getLifecycleService().addLifecycleListener(lifeCycleListener);

    closeConnectionBetween(h1, h2);

    assertOpenEventually(memberShipListener.latch);
    assertClusterSizeEventually(1, h1);
    assertClusterSizeEventually(1, h2);

    CachingProvider cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(h1);
    CachingProvider cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(h2);

    CacheManager cacheManager1 = cachingProvider1.getCacheManager();
    CacheManager cacheManager2 = cachingProvider2.getCacheManager();

    CacheConfig cacheConfig =
        newCacheConfig(cacheName, PassThroughCacheMergePolicy.class.getName());

    Cache cache1 = cacheManager1.createCache(cacheName, cacheConfig);
    Cache cache2 = cacheManager2.createCache(cacheName, cacheConfig);

    String key = generateKeyOwnedBy(h1);
    cache1.put(key, "value");

    cache2.put(key, "passThroughValue");

    assertOpenEventually(lifeCycleListener.latch);
    assertClusterSizeEventually(2, h1);
    assertClusterSizeEventually(2, h2);

    Cache cacheTest = cacheManager2.getCache(cacheName);
    assertEquals("passThroughValue", cacheTest.get(key));
  }
  @BeforeClass
  public static void initialize() throws InterruptedException {
    CacheSimpleConfig cacheConfig = new CacheSimpleConfig();
    cacheConfig.setName(CACHE_NAME_PREFIX + "*");
    cacheConfig.setQuorumName(QUORUM_ID);

    QuorumConfig quorumConfig = new QuorumConfig();
    quorumConfig.setName(QUORUM_ID);
    quorumConfig.setType(QuorumType.READ_WRITE);
    quorumConfig.setEnabled(true);
    quorumConfig.setSize(3);

    PartitionedCluster cluster =
        new PartitionedCluster(new TestHazelcastInstanceFactory())
            .partitionFiveMembersThreeAndTwo(cacheConfig, quorumConfig);
    cachingProvider1 = HazelcastServerCachingProvider.createCachingProvider(cluster.h1);
    cachingProvider2 = HazelcastServerCachingProvider.createCachingProvider(cluster.h2);
    cachingProvider3 = HazelcastServerCachingProvider.createCachingProvider(cluster.h3);
    cachingProvider4 = HazelcastServerCachingProvider.createCachingProvider(cluster.h4);
    cachingProvider5 = HazelcastServerCachingProvider.createCachingProvider(cluster.h5);
  }
  private void executeEntryProcessor(
      Integer key, EntryProcessor<Integer, String, Void> entryProcessor, String cacheName) {
    CachingProvider cachingProvider = HazelcastServerCachingProvider.createCachingProvider(node1);
    CacheManager cacheManager = cachingProvider.getCacheManager();

    CompleteConfiguration<Integer, String> config =
        new MutableConfiguration<Integer, String>().setTypes(Integer.class, String.class);

    Cache<Integer, String> cache = cacheManager.createCache(cacheName, config);

    cache.invoke(key, entryProcessor);
  }
  @Test
  public void test_CacheReplicationOperation_serialization() throws Exception {
    TestHazelcastInstanceFactory factory = new TestHazelcastInstanceFactory(1);
    HazelcastInstance hazelcastInstance = factory.newHazelcastInstance();

    try {
      CachingProvider provider =
          HazelcastServerCachingProvider.createCachingProvider(hazelcastInstance);
      CacheManager manager = provider.getCacheManager();

      CompleteConfiguration configuration = new MutableConfiguration();
      Cache cache1 = manager.createCache("cache1", configuration);
      Cache cache2 = manager.createCache("cache2", configuration);
      Cache cache3 = manager.createCache("cache3", configuration);

      for (int i = 0; i < 1000; i++) {
        cache1.put("key" + i, i);
        cache2.put("key" + i, i);
        cache3.put("key" + i, i);
      }

      HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) hazelcastInstance;

      Field original = HazelcastInstanceProxy.class.getDeclaredField("original");
      original.setAccessible(true);

      HazelcastInstanceImpl impl = (HazelcastInstanceImpl) original.get(proxy);
      NodeEngineImpl nodeEngine = impl.node.nodeEngine;
      CacheService cacheService = nodeEngine.getService(CacheService.SERVICE_NAME);

      int partitionCount = nodeEngine.getPartitionService().getPartitionCount();

      for (int partitionId = 0; partitionId < partitionCount; partitionId++) {
        CachePartitionSegment segment = cacheService.getSegment(partitionId);

        CacheReplicationOperation operation = new CacheReplicationOperation(segment, 1);
        Data serialized = service.toData(operation);
        try {
          service.toObject(serialized);
        } catch (Exception e) {
          throw new Exception("Partition: " + partitionId, e);
        }
      }

    } finally {
      factory.shutdownAll();
    }
  }