예제 #1
0
 CacheEventHandler(NodeEngine nodeEngine) {
   this.nodeEngine = nodeEngine;
   GroupProperties groupProperties = nodeEngine.getGroupProperties();
   invalidationMessageBatchEnabled =
       groupProperties.getBoolean(CACHE_INVALIDATION_MESSAGE_BATCH_ENABLED);
   if (invalidationMessageBatchEnabled) {
     invalidationMessageBatchSize =
         groupProperties.getInteger(CACHE_INVALIDATION_MESSAGE_BATCH_SIZE);
     int invalidationMessageBatchFreq =
         groupProperties.getInteger(CACHE_INVALIDATION_MESSAGE_BATCH_FREQUENCY_SECONDS);
     ExecutionService executionService = nodeEngine.getExecutionService();
     CacheBatchInvalidationMessageSender batchInvalidationMessageSender =
         new CacheBatchInvalidationMessageSender();
     cacheBatchInvalidationMessageSenderScheduler =
         executionService.scheduleWithRepetition(
             ICacheService.SERVICE_NAME + ":cacheBatchInvalidationMessageSender",
             batchInvalidationMessageSender,
             invalidationMessageBatchFreq,
             invalidationMessageBatchFreq,
             TimeUnit.SECONDS);
   }
   LifecycleService lifecycleService = nodeEngine.getHazelcastInstance().getLifecycleService();
   lifecycleService.addLifecycleListener(
       new LifecycleListener() {
         @Override
         public void stateChanged(LifecycleEvent event) {
           if (event.getState() == LifecycleEvent.LifecycleState.SHUTTING_DOWN) {
             invalidateAllCaches();
           }
         }
       });
 }
예제 #2
0
  @Test
  public void testPostregisteredExecutionCallbackCompletableFuture() throws Exception {
    HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) createHazelcastInstance();
    Field originalField = HazelcastInstanceProxy.class.getDeclaredField("original");
    originalField.setAccessible(true);
    HazelcastInstanceImpl hz = (HazelcastInstanceImpl) originalField.get(proxy);
    NodeEngine nodeEngine = hz.node.nodeEngine;
    ExecutionService es = nodeEngine.getExecutionService();

    final CountDownLatch latch1 = new CountDownLatch(1);
    final CountDownLatch latch2 = new CountDownLatch(1);
    final ExecutorService executorService = Executors.newSingleThreadExecutor();
    try {
      Future future =
          executorService.submit(
              new Callable<String>() {
                @Override
                public String call() {
                  try {
                    return "success";
                  } finally {
                    latch1.countDown();
                  }
                }
              });

      final ICompletableFuture completableFuture = es.asCompletableFuture(future);
      latch1.await(30, TimeUnit.SECONDS);

      final AtomicReference reference = new AtomicReference();
      completableFuture.andThen(
          new ExecutionCallback() {
            @Override
            public void onResponse(Object response) {
              reference.set(response);
              latch2.countDown();
            }

            @Override
            public void onFailure(Throwable t) {
              reference.set(t);
              latch2.countDown();
            }
          });

      latch2.await(30, TimeUnit.SECONDS);
      if (reference.get() instanceof Throwable) {
        ((Throwable) reference.get()).printStackTrace();
      }

      assertEquals("success", reference.get());

    } finally {
      executorService.shutdown();
    }
  }
예제 #3
0
 @Before
 public void setUp() throws Exception {
   NodeEngine nodeEngine = getNode(createHazelcastInstance()).getNodeEngine();
   executionService = nodeEngine.getExecutionService();
   startLogicLatch = new CountDownLatch(1);
   executedLogic = new CountDownLatch(1);
   inExecutionLatch = new CountDownLatch(1);
   reference1 = new AtomicReference<Object>();
   reference2 = new AtomicReference<Object>();
 }
예제 #4
0
  @Test
  public void testManagedPreregisteredExecutionCallbackCompletableFuture() throws Exception {
    HazelcastInstanceProxy proxy = (HazelcastInstanceProxy) createHazelcastInstance();
    Field originalField = HazelcastInstanceProxy.class.getDeclaredField("original");
    originalField.setAccessible(true);
    HazelcastInstanceImpl hz = (HazelcastInstanceImpl) originalField.get(proxy);
    NodeEngine nodeEngine = hz.node.nodeEngine;
    ExecutionService es = nodeEngine.getExecutionService();

    final CountDownLatch latch1 = new CountDownLatch(1);
    final CountDownLatch latch2 = new CountDownLatch(1);
    Future future =
        es.submit(
            "default",
            new Callable<String>() {
              @Override
              public String call() {
                try {
                  latch1.await(30, TimeUnit.SECONDS);
                  return "success";
                } catch (Exception e) {
                  throw new RuntimeException(e);
                }
              }
            });

    final AtomicReference reference = new AtomicReference();
    final ICompletableFuture completableFuture = es.asCompletableFuture(future);
    completableFuture.andThen(
        new ExecutionCallback() {
          @Override
          public void onResponse(Object response) {
            reference.set(response);
            latch2.countDown();
          }

          @Override
          public void onFailure(Throwable t) {
            reference.set(t);
            latch2.countDown();
          }
        });

    latch1.countDown();
    latch2.await(30, TimeUnit.SECONDS);
    assertEquals("success", reference.get());
  }
  @Override
  public void removeEndpoint(final ClientEndpoint ce, boolean closeImmediately) {
    checkNotNull(ce, "endpoint can't be null");

    ClientEndpointImpl endpoint = (ClientEndpointImpl) ce;

    endpoints.remove(endpoint.getConnection());
    logger.info("Destroying " + endpoint);
    try {
      endpoint.destroy();
    } catch (LoginException e) {
      logger.warning(e);
    }

    final Connection connection = endpoint.getConnection();
    if (closeImmediately) {
      try {
        connection.close();
      } catch (Throwable e) {
        logger.warning("While closing client connection: " + connection, e);
      }
    } else {
      nodeEngine
          .getExecutionService()
          .schedule(
              new Runnable() {
                public void run() {
                  if (connection.isAlive()) {
                    try {
                      connection.close();
                    } catch (Throwable e) {
                      logger.warning("While closing client connection: " + e.toString());
                    }
                  }
                }
              },
              DESTROY_ENDPOINT_DELAY_MS,
              TimeUnit.MILLISECONDS);
    }
    ClientEvent event =
        new ClientEvent(
            endpoint.getUuid(),
            ClientEventType.DISCONNECTED,
            endpoint.getSocketAddress(),
            endpoint.getClientType());
    clientEngine.sendClientEvent(event);
  }
  protected void submitLoadAllTask(
      final OperationFactory operationFactory, final CompletionListener completionListener) {
    final LoadAllTask loadAllTask = new LoadAllTask(operationFactory, completionListener);
    final ExecutionService executionService = nodeEngine.getExecutionService();
    final CompletableFutureTask<?> future =
        (CompletableFutureTask<?>)
            executionService.submit("loadAll-" + delegate.getName(), loadAllTask);
    loadAllTasks.add(future);
    future.andThen(
        new ExecutionCallback() {
          @Override
          public void onResponse(Object response) {
            loadAllTasks.remove(future);
          }

          @Override
          public void onFailure(Throwable t) {
            loadAllTasks.remove(future);
          }
        });
  }
예제 #7
0
  @Override
  public void init(NodeEngine nodeEngine, Properties properties) {
    long mergeFirstRunDelayMs =
        node.groupProperties.getMillis(GroupProperty.MERGE_FIRST_RUN_DELAY_SECONDS);
    mergeFirstRunDelayMs =
        (mergeFirstRunDelayMs > 0 ? mergeFirstRunDelayMs : DEFAULT_MERGE_RUN_DELAY_MILLIS);

    ExecutionService executionService = nodeEngine.getExecutionService();
    executionService.register(
        EXECUTOR_NAME, 2, CLUSTER_EXECUTOR_QUEUE_CAPACITY, ExecutorType.CACHED);

    long mergeNextRunDelayMs =
        node.groupProperties.getMillis(GroupProperty.MERGE_NEXT_RUN_DELAY_SECONDS);
    mergeNextRunDelayMs =
        (mergeNextRunDelayMs > 0 ? mergeNextRunDelayMs : DEFAULT_MERGE_RUN_DELAY_MILLIS);
    executionService.scheduleWithFixedDelay(
        EXECUTOR_NAME,
        new SplitBrainHandler(node),
        mergeFirstRunDelayMs,
        mergeNextRunDelayMs,
        TimeUnit.MILLISECONDS);

    clusterHeartbeatManager.init();
  }
  void removeEndpoint(final ClientEndpoint endpoint, boolean closeImmediately) {
    endpoints.remove(endpoint.getConnection());
    LOGGER.info("Destroying " + endpoint);
    try {
      endpoint.destroy();
    } catch (LoginException e) {
      LOGGER.warning(e);
    }

    final Connection connection = endpoint.getConnection();
    if (closeImmediately) {
      try {
        connection.close();
      } catch (Throwable e) {
        LOGGER.warning("While closing client connection: " + connection, e);
      }
    } else {
      nodeEngine
          .getExecutionService()
          .schedule(
              new Runnable() {
                public void run() {
                  if (connection.live()) {
                    try {
                      connection.close();
                    } catch (Throwable e) {
                      LOGGER.warning("While closing client connection: " + e.toString());
                    }
                  }
                }
              },
              DESTROY_ENDPOINT_DELAY_MS,
              TimeUnit.MILLISECONDS);
    }
    clientEngine.sendClientEvent(endpoint);
  }
예제 #9
0
  public void run() {
    final NodeEngine nodeEngine = getNodeEngine();
    final Address masterAddress = nodeEngine.getMasterAddress();
    if (!masterAddress.equals(migrationInfo.getMaster())) {
      throw new RetryableHazelcastException(
          "Migration initiator is not master node! => " + toString());
    }
    if (!masterAddress.equals(getCallerAddress())) {
      throw new RetryableHazelcastException("Caller is not master node! => " + toString());
    }

    final Address source = migrationInfo.getSource();
    final Address destination = migrationInfo.getDestination();
    final Member target = nodeEngine.getClusterService().getMember(destination);
    if (target == null) {
      throw new RetryableHazelcastException(
          "Destination of migration could not be found! => " + toString());
    }
    if (destination.equals(source)) {
      getLogger().warning("Source and destination addresses are the same! => " + toString());
      success = false;
      return;
    }

    if (source == null || !source.equals(nodeEngine.getThisAddress())) {
      throw new RetryableHazelcastException(
          "Source of migration is not this node! => " + toString());
    }
    if (migrationInfo.startProcessing()) {
      try {
        PartitionServiceImpl partitionService = getService();
        PartitionImpl partition = partitionService.getPartition(migrationInfo.getPartitionId());
        final Address owner = partition.getOwner();
        if (!source.equals(owner)) {
          throw new HazelcastException(
              "Cannot migrate! This node is not owner of the partition => "
                  + migrationInfo
                  + " -> "
                  + partition);
        }
        partitionService.addActiveMigration(migrationInfo);
        final long[] replicaVersions =
            partitionService.getPartitionReplicaVersions(migrationInfo.getPartitionId());
        final long timeout = nodeEngine.getGroupProperties().PARTITION_MIGRATION_TIMEOUT.getLong();
        final Collection<Operation> tasks = prepareMigrationTasks();
        if (tasks.size() > 0) {
          returnResponse = false;
          final ResponseHandler responseHandler = getResponseHandler();
          final SerializationService serializationService = nodeEngine.getSerializationService();

          nodeEngine
              .getExecutionService()
              .getExecutor(ExecutionService.ASYNC_EXECUTOR)
              .execute(
                  new Runnable() {
                    public void run() {
                      final BufferObjectDataOutput out =
                          serializationService.createObjectDataOutput(1024 * 32);
                      try {
                        out.writeInt(tasks.size());
                        for (Operation task : tasks) {
                          serializationService.writeObject(out, task);
                        }
                        final byte[] data;
                        boolean compress =
                            nodeEngine
                                .getGroupProperties()
                                .PARTITION_MIGRATION_ZIP_ENABLED
                                .getBoolean();
                        if (compress) {
                          data = IOUtil.compress(out.toByteArray());
                        } else {
                          data = out.toByteArray();
                        }
                        final MigrationOperation migrationOperation =
                            new MigrationOperation(
                                migrationInfo, replicaVersions, data, tasks.size(), compress);
                        Invocation inv =
                            nodeEngine
                                .getOperationService()
                                .createInvocationBuilder(
                                    PartitionServiceImpl.SERVICE_NAME,
                                    migrationOperation,
                                    destination)
                                .setTryPauseMillis(1000)
                                .setReplicaIndex(getReplicaIndex())
                                .build();
                        Future future = inv.invoke();
                        Boolean result =
                            (Boolean) nodeEngine.toObject(future.get(timeout, TimeUnit.SECONDS));
                        responseHandler.sendResponse(result);
                      } catch (Throwable e) {
                        responseHandler.sendResponse(Boolean.FALSE);
                        if (e instanceof ExecutionException) {
                          e = e.getCause() != null ? e.getCause() : e;
                        }
                        Level level =
                            (e instanceof MemberLeftException || e instanceof InterruptedException)
                                    || !getNodeEngine().isActive()
                                ? Level.INFO
                                : Level.WARNING;
                        getLogger().log(level, e.getMessage(), e);
                      } finally {
                        IOUtil.closeResource(out);
                      }
                    }
                  });
        } else {
          success = true;
        }
      } catch (Throwable e) {
        getLogger().warning(e);
        success = false;
      } finally {
        migrationInfo.doneProcessing();
      }
    } else {
      getLogger().warning("Migration is cancelled -> " + migrationInfo);
      success = false;
    }
  }
예제 #10
0
 public DefaultRecordStore(String name, MapService mapService, int partitionId) {
   this.name = name;
   this.partitionId = partitionId;
   this.mapService = mapService;
   this.mapContainer = mapService.getMapContainer(name);
   this.logger = mapService.getNodeEngine().getLogger(this.getName());
   recordFactory = mapContainer.getRecordFactory();
   NodeEngine nodeEngine = mapService.getNodeEngine();
   final LockService lockService = nodeEngine.getSharedService(LockService.SERVICE_NAME);
   this.lockStore =
       lockService == null
           ? null
           : lockService.createLockStore(
               partitionId, new DefaultObjectNamespace(MapService.SERVICE_NAME, name));
   this.sizeEstimator = SizeEstimators.createMapSizeEstimator();
   final int mapLoadChunkSize = nodeEngine.getGroupProperties().MAP_LOAD_CHUNK_SIZE.getInteger();
   final Queue<Map> chunks = new LinkedList<Map>();
   if (nodeEngine
       .getThisAddress()
       .equals(nodeEngine.getPartitionService().getPartitionOwner(partitionId))) {
     if (mapContainer.getStore() != null && !loaded.get()) {
       Map<Data, Object> loadedKeys = mapContainer.getInitialKeys();
       if (loadedKeys != null && !loadedKeys.isEmpty()) {
         Map<Data, Object> partitionKeys = new HashMap<Data, Object>();
         Iterator<Map.Entry<Data, Object>> iterator = loadedKeys.entrySet().iterator();
         while (iterator.hasNext()) {
           final Map.Entry<Data, Object> entry = iterator.next();
           final Data data = entry.getKey();
           if (partitionId == nodeEngine.getPartitionService().getPartitionId(data)) {
             partitionKeys.put(data, entry.getValue());
             // split into chunks
             if (partitionKeys.size() >= mapLoadChunkSize) {
               chunks.add(partitionKeys);
               partitionKeys = new HashMap<Data, Object>();
             }
             iterator.remove();
           }
         }
         if (!partitionKeys.isEmpty()) {
           chunks.add(partitionKeys);
         }
         if (!chunks.isEmpty()) {
           try {
             Map<Data, Object> chunkedKeys;
             final AtomicInteger checkIfMapLoaded = new AtomicInteger(chunks.size());
             while ((chunkedKeys = chunks.poll()) != null) {
               nodeEngine
                   .getExecutionService()
                   .submit("hz:map-load", new MapLoadAllTask(chunkedKeys, checkIfMapLoaded));
             }
           } catch (Throwable t) {
             throw ExceptionUtil.rethrow(t);
           }
         } else {
           loaded.set(true);
         }
       } else {
         loaded.set(true);
       }
     }
   } else {
     loaded.set(true);
   }
 }