package org.infinispan.iteration;
Esempio n. 2
0
  /**
   * Adds the listener using the provided filter converter and class loader. The provided builder is
   * used to add additional configuration including (clustered, onlyPrimary & identifier) which can
   * be used after this method is completed to see what values were used in the addition of this
   * listener
   *
   * @param listener
   * @param filter
   * @param converter
   * @param classLoader
   * @param <C>
   * @return
   */
  public <C> void addListener(
      Object listener,
      CacheEventFilter<? super K, ? super V> filter,
      CacheEventConverter<? super K, ? super V, C> converter,
      ClassLoader classLoader) {
    Listener l = testListenerClassValidity(listener.getClass());
    UUID generatedId = UUID.randomUUID();
    CacheMode cacheMode = config.clustering().cacheMode();
    CacheInvocationBuilder builder = new CacheInvocationBuilder();
    builder
        .setIncludeCurrentState(l.includeCurrentState())
        .setClustered(l.clustered())
        .setOnlyPrimary(
            l.clustered() ? (cacheMode.isDistributed() ? true : false) : l.primaryOnly())
        .setFilter(filter)
        .setConverter(converter)
        .setIdentifier(generatedId)
        .setClassLoader(classLoader);
    boolean foundMethods = validateAndAddListenerInvocation(listener, builder);

    if (foundMethods && l.clustered()) {
      if (cacheMode.isInvalidation()) {
        throw new UnsupportedOperationException(
            "Cluster listeners cannot be used with Invalidation Caches!");
      } else if (cacheMode.isDistributed()) {
        clusterListenerIDs.put(listener, generatedId);
        EmbeddedCacheManager manager = cache.getCacheManager();
        Address ourAddress = manager.getAddress();

        List<Address> members = manager.getMembers();
        // If we are the only member don't even worry about sending listeners
        if (members != null && members.size() > 1) {
          DistributedExecutionCompletionService decs =
              new DistributedExecutionCompletionService(distExecutorService);

          if (log.isTraceEnabled()) {
            log.tracef(
                "Replicating cluster listener to other nodes %s for cluster listener with id %s",
                members, generatedId);
          }
          Callable callable =
              new ClusterListenerReplicateCallable(
                  generatedId, ourAddress, filter, converter, l.sync());
          for (Address member : members) {
            if (!member.equals(ourAddress)) {
              decs.submit(member, callable);
            }
          }

          for (int i = 0; i < members.size() - 1; ++i) {
            try {
              decs.take().get();
            } catch (InterruptedException e) {
              throw new CacheListenerException(e);
            } catch (ExecutionException e) {
              throw new CacheListenerException(e);
            }
          }

          int extraCount = 0;
          // If anyone else joined since we sent these we have to send the listeners again, since
          // they may have queried
          // before the other nodes got the new listener
          List<Address> membersAfter = manager.getMembers();
          for (Address member : membersAfter) {
            if (!members.contains(member) && !member.equals(ourAddress)) {
              if (log.isTraceEnabled()) {
                log.tracef(
                    "Found additional node %s that joined during replication of cluster listener with id %s",
                    member, generatedId);
              }
              extraCount++;
              decs.submit(member, callable);
            }
          }

          for (int i = 0; i < extraCount; ++i) {
            try {
              decs.take().get();
            } catch (InterruptedException e) {
              throw new CacheListenerException(e);
            } catch (ExecutionException e) {
              throw new CacheListenerException(e);
            }
          }
        }
      }
    }

    // If we have a segment listener handler, it means we have to do initial state
    QueueingSegmentListener handler = segmentHandler.remove(generatedId);
    if (handler != null) {
      if (log.isTraceEnabled()) {
        log.tracef("Listener %s requests initial state for cache", generatedId);
      }
      try (CloseableIterator<CacheEntry<K, C>> iterator =
          entryRetriever.retrieveEntries(
              filter == null ? null : new CacheEventFilterAsKeyValueFilter(filter),
              converter == null ? null : new CacheEventConverterAsConverter(converter),
              null,
              handler)) {
        while (iterator.hasNext()) {
          CacheEntry<K, C> entry = iterator.next();
          // Mark the key as processed and see if we had a concurrent update
          Object value = handler.markKeyAsProcessing(entry.getKey());
          if (value == BaseQueueingSegmentListener.REMOVED) {
            // Don't process this value if we had a concurrent remove
            continue;
          }
          raiseEventForInitialTransfer(generatedId, entry, builder.isClustered());

          handler.notifiedKey(entry.getKey());
        }
      }

      Set<CacheEntry> entries = handler.findCreatedEntries();

      for (CacheEntry entry : entries) {
        raiseEventForInitialTransfer(generatedId, entry, builder.isClustered());
      }

      if (log.isTraceEnabled()) {
        log.tracef("Listener %s initial state for cache completed", generatedId);
      }

      handler.transferComplete();
    }
  }
package org.infinispan.iteration;