@Start(priority = 9) // Start before cache loader manager
 @SuppressWarnings("unused")
 private void start() {
   final int concurrencyLevel = configuration.locking().concurrencyLevel();
   localTransactions =
       ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
   globalToLocalTransactions =
       ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
   if (configuration.clustering().cacheMode().isClustered()) {
     minTopologyRecalculationLock = new ReentrantLock();
     // Only initialize this if we are clustered.
     remoteTransactions =
         ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
     cleanupService.start(cacheName, rpcManager, configuration);
     notifier.addListener(cleanupService);
     notifier.addListener(this);
     clustered = true;
   }
 }
Exemplo n.º 2
0
 @Start
 private void start() {
   final int concurrencyLevel = configuration.locking().concurrencyLevel();
   localTransactions =
       ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
   if (configuration.clustering().cacheMode().isClustered()) {
     minViewRecalculationLock = new ReentrantLock();
     // Only initialize this if we are clustered.
     remoteTransactions =
         ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
     cleanupService.start(cacheName, rpcManager, invoker);
     cm.addListener(cleanupService);
     cm.addListener(this);
     notifier.addListener(cleanupService);
     minTxViewId = rpcManager.getTransport().getViewId();
     currentViewId = minTxViewId;
     log.debugf("Min view id set to %s", minTxViewId);
     clustered = true;
   }
 }
Exemplo n.º 3
0
 public L1ManagerImpl() {
   requestors = ConcurrentMapFactory.makeConcurrentMap();
 }
Exemplo n.º 4
0
/**
 * An encapsulation of a JGroups transport. JGroups transports can be configured using a variety of
 * methods, usually by passing in one of the following properties:
 *
 * <ul>
 *   <li><tt>configurationString</tt> - a JGroups configuration String
 *   <li><tt>configurationXml</tt> - JGroups configuration XML as a String
 *   <li><tt>configurationFile</tt> - String pointing to a JGroups XML configuration file
 *   <li><tt>channelLookup</tt> - Fully qualified class name of a {@link
 *       org.infinispan.remoting.transport.jgroups.JGroupsChannelLookup} instance
 * </ul>
 *
 * These are normally passed in as Properties in {@link
 * org.infinispan.config.GlobalConfiguration#setTransportProperties(java.util.Properties)} or in the
 * Infinispan XML configuration file.
 *
 * @author Manik Surtani
 * @author Galder Zamarreño
 * @since 4.0
 */
public class JGroupsTransport extends AbstractTransport implements MembershipListener {
  public static final String CONFIGURATION_STRING = "configurationString";
  public static final String CONFIGURATION_XML = "configurationXml";
  public static final String CONFIGURATION_FILE = "configurationFile";
  public static final String CHANNEL_LOOKUP = "channelLookup";
  protected static final String DEFAULT_JGROUPS_CONFIGURATION_FILE = "jgroups-udp.xml";

  static final Log log = LogFactory.getLog(JGroupsTransport.class);
  static final boolean trace = log.isTraceEnabled();
  final ConcurrentMap<String, StateTransferMonitor> stateTransfersInProgress =
      ConcurrentMapFactory.makeConcurrentMap();

  protected boolean startChannel = true, stopChannel = true;
  private CommandAwareRpcDispatcher dispatcher;
  protected TypedProperties props;
  protected InboundInvocationHandler inboundInvocationHandler;
  protected StreamingMarshaller marshaller;
  protected ExecutorService asyncExecutor;
  protected CacheManagerNotifier notifier;

  private boolean globalStatsEnabled;
  private MBeanServer mbeanServer;
  private String domain;

  protected Channel channel;
  protected Address address;
  protected Address physicalAddress;

  // these members are not valid until we have received the first view on a second thread
  // and channelConnectedLatch is signaled
  protected volatile List<Address> members = null;
  protected volatile Address coordinator = null;
  protected volatile boolean isCoordinator = false;
  protected CountDownLatch channelConnectedLatch = new CountDownLatch(1);

  /**
   * This form is used when the transport is created by an external source and passed in to the
   * GlobalConfiguration.
   *
   * @param channel created and running channel to use
   */
  public JGroupsTransport(Channel channel) {
    this.channel = channel;
    if (channel == null) throw new IllegalArgumentException("Cannot deal with a null channel!");
    if (channel.isConnected())
      throw new IllegalArgumentException("Channel passed in cannot already be connected!");
  }

  public JGroupsTransport() {}

  @Override
  public Log getLog() {
    return log;
  }

  // ------------------------------------------------------------------------------------------------------------------
  // Lifecycle and setup stuff
  // ------------------------------------------------------------------------------------------------------------------

  @Override
  public void initialize(
      @ComponentName(GLOBAL_MARSHALLER) StreamingMarshaller marshaller,
      ExecutorService asyncExecutor,
      InboundInvocationHandler inboundInvocationHandler,
      CacheManagerNotifier notifier) {
    this.marshaller = marshaller;
    this.asyncExecutor = asyncExecutor;
    this.inboundInvocationHandler = inboundInvocationHandler;
    this.notifier = notifier;
  }

  @Override
  public void start() {
    props = TypedProperties.toTypedProperties(configuration.getTransportProperties());

    if (log.isInfoEnabled()) log.startingJGroupsChannel();

    initChannelAndRPCDispatcher();
    startJGroupsChannelIfNeeded();

    waitForChannelToConnect();
  }

  protected void startJGroupsChannelIfNeeded() {
    if (startChannel) {
      String clusterName = configuration.getClusterName();
      try {
        channel.connect(clusterName);
      } catch (Exception e) {
        throw new CacheException("Unable to start JGroups Channel", e);
      }

      try {
        // Normally this would be done by CacheManagerJmxRegistration but
        // the channel is not started when the cache manager starts but
        // when first cache starts, so it's safer to do it here.
        globalStatsEnabled = configuration.isExposeGlobalJmxStatistics();
        if (globalStatsEnabled) {
          String groupName =
              String.format("type=channel,cluster=%s", ObjectName.quote(clusterName));
          mbeanServer = JmxUtil.lookupMBeanServer(configuration);
          domain = JmxUtil.buildJmxDomain(configuration, mbeanServer, groupName);
          JmxConfigurator.registerChannel(
              (JChannel) channel, mbeanServer, domain, clusterName, true);
        }
      } catch (Exception e) {
        throw new CacheException("Channel connected, but unable to register MBeans", e);
      }
    }
    address = fromJGroupsAddress(channel.getAddress());
    if (!startChannel) {
      // the channel was already started externally, we need to initialize our member list
      viewAccepted(channel.getView());
    }
    if (log.isInfoEnabled()) log.localAndPhysicalAddress(getAddress(), getPhysicalAddresses());
  }

  @Override
  public int getViewId() {
    if (channel == null)
      throw new CacheException("The cache has been stopped and invocations are not allowed!");
    View view = channel.getView();
    if (view == null) return -1;
    return (int) view.getVid().getId();
  }

  @Override
  public void stop() {
    try {
      if (stopChannel && channel != null && channel.isOpen()) {
        log.disconnectAndCloseJGroups();

        // Unregistering before disconnecting/closing because
        // after that the cluster name is null
        if (globalStatsEnabled) {
          JmxConfigurator.unregisterChannel(
              (JChannel) channel, mbeanServer, domain, channel.getClusterName());
        }

        channel.disconnect();
        channel.close();
      }
    } catch (Exception toLog) {
      log.problemClosingChannel(toLog);
    }

    channel = null;
    if (dispatcher != null) {
      log.stoppingRpcDispatcher();
      dispatcher.stop();
    }

    members = Collections.emptyList();
    coordinator = null;
    isCoordinator = false;
    dispatcher = null;
  }

  protected void initChannel() {
    if (channel == null) {
      buildChannel();
      String transportNodeName = configuration.getTransportNodeName();
      if (transportNodeName != null && transportNodeName.length() > 0) {
        long range = Short.MAX_VALUE * 2;
        long randomInRange = (long) ((Math.random() * range) % range) + 1;
        transportNodeName = transportNodeName + "-" + randomInRange;
        channel.setName(transportNodeName);
      }
    }

    // Channel.LOCAL *must* be set to false so we don't see our own messages - otherwise
    // invalidations targeted at remote instances will be received by self.
    channel.setDiscardOwnMessages(true);

    // if we have a TopologyAwareConsistentHash, we need to set our own address generator in
    // JGroups
    if (configuration.hasTopologyInfo()) {
      // We can do this only if the channel hasn't been started already
      if (startChannel) {
        ((JChannel) channel)
            .setAddressGenerator(
                new AddressGenerator() {

                  @Override
                  public org.jgroups.Address generateAddress() {
                    return TopologyUUID.randomUUID(
                        channel.getName(),
                        configuration.getSiteId(),
                        configuration.getRackId(),
                        configuration.getMachineId());
                  }
                });
      } else {
        if (channel.getAddress() instanceof TopologyUUID) {
          TopologyUUID topologyAddress = (TopologyUUID) channel.getAddress();
          if (!configuration.getSiteId().equals(topologyAddress.getSiteId())
              || !configuration.getRackId().equals(topologyAddress.getRackId())
              || !configuration.getMachineId().equals(topologyAddress.getMachineId())) {
            throw new CacheException(
                "Topology information does not match the one set by the provided JGroups channel");
          }
        } else {
          throw new CacheException("JGroups address does not contain topology coordinates");
        }
      }
    }
  }

  private void initChannelAndRPCDispatcher() throws CacheException {
    initChannel();
    dispatcher =
        new CommandAwareRpcDispatcher(channel, this, asyncExecutor, inboundInvocationHandler);
    MarshallerAdapter adapter = new MarshallerAdapter(marshaller);
    dispatcher.setRequestMarshaller(adapter);
    dispatcher.setResponseMarshaller(adapter);
    dispatcher.start();
  }

  // This is per CM, so the CL in use should be the CM CL
  private void buildChannel() {
    // in order of preference - we first look for an external JGroups file, then a set of XML
    // properties, and
    // finally the legacy JGroups String properties.
    String cfg;
    if (props != null) {
      if (props.containsKey(CHANNEL_LOOKUP)) {
        String channelLookupClassName = props.getProperty(CHANNEL_LOOKUP);

        try {
          JGroupsChannelLookup lookup =
              (JGroupsChannelLookup)
                  Util.getInstance(channelLookupClassName, configuration.getClassLoader());
          channel = lookup.getJGroupsChannel(props);
          startChannel = lookup.shouldStartAndConnect();
          stopChannel = lookup.shouldStopAndDisconnect();
        } catch (ClassCastException e) {
          log.wrongTypeForJGroupsChannelLookup(channelLookupClassName, e);
          throw new CacheException(e);
        } catch (Exception e) {
          log.errorInstantiatingJGroupsChannelLookup(channelLookupClassName, e);
          throw new CacheException(e);
        }
        if (configuration.isStrictPeerToPeer() && !startChannel) {
          log.warnStrictPeerToPeerWithInjectedChannel();
        }
      }

      if (channel == null && props.containsKey(CONFIGURATION_FILE)) {
        cfg = props.getProperty(CONFIGURATION_FILE);
        try {
          channel =
              new JChannel(
                  FileLookupFactory.newInstance()
                      .lookupFileLocation(cfg, configuration.getClassLoader()));
        } catch (Exception e) {
          log.errorCreatingChannelFromConfigFile(cfg);
          throw new CacheException(e);
        }
      }

      if (channel == null && props.containsKey(CONFIGURATION_XML)) {
        cfg = props.getProperty(CONFIGURATION_XML);
        try {
          channel = new JChannel(XmlConfigHelper.stringToElement(cfg));
        } catch (Exception e) {
          log.errorCreatingChannelFromXML(cfg);
          throw new CacheException(e);
        }
      }

      if (channel == null && props.containsKey(CONFIGURATION_STRING)) {
        cfg = props.getProperty(CONFIGURATION_STRING);
        try {
          channel = new JChannel(cfg);
        } catch (Exception e) {
          log.errorCreatingChannelFromConfigString(cfg);
          throw new CacheException(e);
        }
      }
    }

    if (channel == null) {
      log.unableToUseJGroupsPropertiesProvided(props);
      try {
        channel =
            new JChannel(
                FileLookupFactory.newInstance()
                    .lookupFileLocation(
                        DEFAULT_JGROUPS_CONFIGURATION_FILE, configuration.getClassLoader()));
      } catch (Exception e) {
        throw new CacheException("Unable to start JGroups channel", e);
      }
    }
  }

  // ------------------------------------------------------------------------------------------------------------------
  // querying cluster status
  // ------------------------------------------------------------------------------------------------------------------

  @Override
  public boolean isCoordinator() {
    return isCoordinator;
  }

  @Override
  public Address getCoordinator() {
    return coordinator;
  }

  public void waitForChannelToConnect() {
    if (channel == null) return;
    log.debug("Waiting on view being accepted");
    try {
      channelConnectedLatch.await();
    } catch (InterruptedException e) {
      log.interruptedWaitingForCoordinator(e);
    }
  }

  @Override
  public List<Address> getMembers() {
    return members != null ? members : Collections.<Address>emptyList();
  }

  @Override
  public boolean isMulticastCapable() {
    return channel.getProtocolStack().getTransport().supportsMulticasting();
  }

  @Override
  public Address getAddress() {
    if (address == null && channel != null) {
      address = fromJGroupsAddress(channel.getAddress());
    }
    return address;
  }

  @Override
  public List<Address> getPhysicalAddresses() {
    if (physicalAddress == null && channel != null) {
      org.jgroups.Address addr =
          (org.jgroups.Address)
              channel.down(new Event(Event.GET_PHYSICAL_ADDRESS, channel.getAddress()));
      if (addr == null) {
        return Collections.emptyList();
      }
      physicalAddress = new JGroupsAddress(addr);
    }
    return Collections.singletonList(physicalAddress);
  }

  // ------------------------------------------------------------------------------------------------------------------
  // outbound RPC
  // ------------------------------------------------------------------------------------------------------------------

  @Override
  public Map<Address, Response> invokeRemotely(
      Collection<Address> recipients,
      ReplicableCommand rpcCommand,
      ResponseMode mode,
      long timeout,
      boolean usePriorityQueue,
      ResponseFilter responseFilter,
      boolean supportReplay)
      throws Exception {

    if (recipients != null && recipients.isEmpty()) {
      // don't send if dest list is empty
      log.trace("Destination list is empty: no need to send message");
      return Collections.emptyMap();
    }

    if (trace)
      log.tracef(
          "dests=%s, command=%s, mode=%s, timeout=%s", recipients, rpcCommand, mode, timeout);

    if (mode.isSynchronous() && recipients != null && !getMembers().containsAll(recipients)) {
      if (mode == ResponseMode.SYNCHRONOUS)
        throw new SuspectException(
            "One or more nodes have left the cluster while replicating command " + rpcCommand);
      else { // SYNCHRONOUS_IGNORE_LEAVERS || WAIT_FOR_VALID_RESPONSE
        recipients = new ArrayList<Address>(recipients);
        recipients.retainAll(getMembers());
      }
    }
    boolean asyncMarshalling = mode == ResponseMode.ASYNCHRONOUS;
    if (!usePriorityQueue
        && (ResponseMode.SYNCHRONOUS == mode || ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS == mode))
      usePriorityQueue = true;

    boolean broadcast = recipients == null || recipients.size() == members.size();
    RspList<Object> rsps =
        dispatcher.invokeRemoteCommands(
            toJGroupsAddressList(recipients),
            rpcCommand,
            toJGroupsMode(mode),
            timeout,
            recipients != null,
            usePriorityQueue,
            toJGroupsFilter(responseFilter),
            supportReplay,
            asyncMarshalling,
            broadcast);

    if (mode.isAsynchronous()) return Collections.emptyMap(); // async case

    // short-circuit no-return-value calls.
    if (rsps == null) return Collections.emptyMap();
    Map<Address, Response> retval = new HashMap<Address, Response>(rsps.size());

    boolean ignoreLeavers =
        mode == ResponseMode.SYNCHRONOUS_IGNORE_LEAVERS
            || mode == ResponseMode.WAIT_FOR_VALID_RESPONSE;
    boolean noValidResponses = true;
    for (Rsp<Object> rsp : rsps.values()) {
      noValidResponses &=
          parseResponseAndAddToResponseList(
              rsp.getValue(),
              rsp.getException(),
              retval,
              rsp.wasSuspected(),
              rsp.wasReceived(),
              fromJGroupsAddress(rsp.getSender()),
              responseFilter != null,
              ignoreLeavers);
    }

    if (noValidResponses) throw new TimeoutException("Timed out waiting for valid responses!");
    return retval;
  }

  private static org.jgroups.blocks.ResponseMode toJGroupsMode(ResponseMode mode) {
    switch (mode) {
      case ASYNCHRONOUS:
      case ASYNCHRONOUS_WITH_SYNC_MARSHALLING:
        return org.jgroups.blocks.ResponseMode.GET_NONE;
      case SYNCHRONOUS:
      case SYNCHRONOUS_IGNORE_LEAVERS:
        return org.jgroups.blocks.ResponseMode.GET_ALL;
      case WAIT_FOR_VALID_RESPONSE:
        return org.jgroups.blocks.ResponseMode.GET_FIRST;
    }
    throw new CacheException("Unknown response mode " + mode);
  }

  private RspFilter toJGroupsFilter(ResponseFilter responseFilter) {
    return responseFilter == null ? null : new JGroupsResponseFilterAdapter(responseFilter);
  }

  // ------------------------------------------------------------------------------------------------------------------
  // Implementations of JGroups interfaces
  // ------------------------------------------------------------------------------------------------------------------

  private interface Notify {
    void emitNotification(List<Address> oldMembers, View newView);
  }

  private class NotifyViewChange implements Notify {
    @Override
    public void emitNotification(List<Address> oldMembers, View newView) {
      notifier.notifyViewChange(members, oldMembers, getAddress(), (int) newView.getVid().getId());
    }
  }

  private class NotifyMerge implements Notify {

    @Override
    public void emitNotification(List<Address> oldMembers, View newView) {
      MergeView mv = (MergeView) newView;

      final Address address = getAddress();
      final int viewId = (int) newView.getVid().getId();
      notifier.notifyMerge(members, oldMembers, address, viewId, getSubgroups(mv.getSubgroups()));
    }

    private List<List<Address>> getSubgroups(List<View> subviews) {
      List<List<Address>> l = new ArrayList<List<Address>>(subviews.size());
      for (View v : subviews) l.add(fromJGroupsAddressList(v.getMembers()));
      return l;
    }
  }

  @Override
  public void viewAccepted(View newView) {
    log.debugf("New view accepted: %s", newView);
    List<org.jgroups.Address> newMembers = newView.getMembers();
    if (newMembers == null || newMembers.isEmpty()) {
      log.debugf("Received null or empty member list from JGroups channel: " + newView);
      return;
    }

    List<Address> oldMembers = members;
    // we need a defensive copy anyway
    members = fromJGroupsAddressList(newMembers);

    // Now that we have a view, figure out if we are the isCoordinator
    coordinator = fromJGroupsAddress(newView.getCreator());
    isCoordinator = coordinator != null && coordinator.equals(getAddress());

    // Wake up any threads that are waiting to know about who the isCoordinator is
    // do it before the notifications, so if a listener throws an exception we can still start
    channelConnectedLatch.countDown();

    // now notify listeners - *after* updating the isCoordinator. - JBCACHE-662
    boolean hasNotifier = notifier != null;
    if (hasNotifier) {
      Notify n;
      if (newView instanceof MergeView) {
        if (log.isInfoEnabled()) log.receivedMergedView(newView);
        n = new NotifyMerge();
      } else {
        if (log.isInfoEnabled()) log.receivedClusterView(newView);
        n = new NotifyViewChange();
      }

      n.emitNotification(oldMembers, newView);
    }
  }

  @Override
  public void suspect(org.jgroups.Address suspected_mbr) {
    // no-op
  }

  @Override
  public void block() {
    // no-op since ISPN-83 has been resolved
  }

  @Override
  public void unblock() {
    // no-op since ISPN-83 has been resolved
  }

  // ------------------------------------------------------------------------------------------------------------------
  // Helpers to convert between Address types
  // ------------------------------------------------------------------------------------------------------------------

  protected static org.jgroups.Address toJGroupsAddress(Address a) {
    return ((JGroupsAddress) a).address;
  }

  static Address fromJGroupsAddress(org.jgroups.Address addr) {
    if (addr instanceof TopologyUUID) return new JGroupsTopologyAwareAddress((TopologyUUID) addr);
    else return new JGroupsAddress(addr);
  }

  private static List<org.jgroups.Address> toJGroupsAddressList(Collection<Address> list) {
    if (list == null) return null;
    if (list.isEmpty()) return Collections.emptyList();

    List<org.jgroups.Address> retval = new LinkedList<org.jgroups.Address>();
    for (Address a : list) retval.add(toJGroupsAddress(a));

    return retval;
  }

  private static List<Address> fromJGroupsAddressList(List<org.jgroups.Address> list) {
    if (list == null || list.isEmpty()) return Collections.emptyList();

    List<Address> retval = new ArrayList<Address>(list.size());
    for (org.jgroups.Address a : list) retval.add(fromJGroupsAddress(a));
    return Collections.unmodifiableList(retval);
  }

  // mainly for unit testing

  public CommandAwareRpcDispatcher getCommandAwareRpcDispatcher() {
    return dispatcher;
  }

  public Channel getChannel() {
    return channel;
  }
}
Exemplo n.º 5
0
/**
 * This interceptor will be created when the System Property "infinispan.query.indexLocalOnly" is
 * "false"
 *
 * <p>This type of interceptor will allow the indexing of data even when it comes from other caches
 * within a cluster.
 *
 * <p>However, if the a cache would not be putting the data locally, the interceptor will not index
 * it.
 *
 * @author Navin Surtani
 * @author Sanne Grinovero <*****@*****.**> (C) 2011 Red Hat Inc.
 * @author Marko Luksa
 * @since 4.0
 */
public class QueryInterceptor extends CommandInterceptor {

  private final SearchFactoryIntegrator searchFactory;
  private final ConcurrentMap<Class<?>, Boolean> knownClasses =
      ConcurrentMapFactory.makeConcurrentMap();
  private final Lock mutating = new ReentrantLock();
  private final KeyTransformationHandler keyTransformationHandler = new KeyTransformationHandler();
  protected TransactionManager transactionManager;
  protected TransactionSynchronizationRegistry transactionSynchronizationRegistry;
  protected ExecutorService asyncExecutor;

  private static final Log log = LogFactory.getLog(QueryInterceptor.class, Log.class);

  @Override
  protected Log getLog() {
    return log;
  }

  public QueryInterceptor(SearchFactoryIntegrator searchFactory) {
    this.searchFactory = searchFactory;
  }

  @Inject
  public void injectDependencies(
      @ComponentName(KnownComponentNames.ASYNC_TRANSPORT_EXECUTOR) ExecutorService e) {
    this.asyncExecutor = e;
  }

  protected boolean shouldModifyIndexes(InvocationContext ctx) {
    return !ctx.hasFlag(Flag.SKIP_INDEXING);
  }

  /**
   * Use this executor for Async operations
   *
   * @return
   */
  public ExecutorService getAsyncExecutor() {
    return asyncExecutor;
  }

  @Override
  public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command)
      throws Throwable {

    // This method will get the put() calls on the cache and then send them into Lucene once it's
    // successful.
    // do the actual put first.
    Object toReturn = invokeNextInterceptor(ctx, command);

    if (shouldModifyIndexes(ctx)) {
      // First making a check to see if the key is already in the cache or not. If it isn't we can
      // add the key no problem,
      // otherwise we need to be updating the indexes as opposed to simply adding to the indexes.
      getLog().debug("Infinispan Query indexing is triggered");
      Object key = command.getKey();
      Object value = extractValue(command.getValue());

      if (updateKnownTypesIfNeeded(value)) {
        // This means that the entry is just modified so we need to update the indexes and not add
        // to them.
        updateIndexes(value, extractValue(key));
      } else {
        if (updateKnownTypesIfNeeded(toReturn)) {
          removeFromIndexes(toReturn, extractValue(command.getKey()));
        }
      }
    }
    return toReturn;
  }

  @Override
  public Object visitRemoveCommand(InvocationContext ctx, RemoveCommand command) throws Throwable {
    // remove the object out of the cache first.
    Object valueRemoved = invokeNextInterceptor(ctx, command);

    if (command.isSuccessful() && !command.isNonExistent() && shouldModifyIndexes(ctx)) {
      Object value = extractValue(valueRemoved);
      if (updateKnownTypesIfNeeded(value)) {
        removeFromIndexes(value, extractValue(command.getKey()));
      }
    }
    return valueRemoved;
  }

  @Override
  public Object visitReplaceCommand(InvocationContext ctx, ReplaceCommand command)
      throws Throwable {
    Object valueReplaced = invokeNextInterceptor(ctx, command);
    if (valueReplaced != null && command.isSuccessful() && shouldModifyIndexes(ctx)) {

      Object[] parameters = command.getParameters();
      Object p1 = extractValue(parameters[1]);
      Object p2 = extractValue(parameters[2]);
      boolean originalIsIndexed = updateKnownTypesIfNeeded(p1);
      boolean newValueIsIndexed = updateKnownTypesIfNeeded(p2);
      Object key = extractValue(command.getKey());

      if (p1 != null && originalIsIndexed) {
        removeFromIndexes(p1, key);
      }
      if (newValueIsIndexed) {
        updateIndexes(p2, key);
      }
    }

    return valueReplaced;
  }

  @Override
  public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
    Object mapPut = invokeNextInterceptor(ctx, command);

    if (shouldModifyIndexes(ctx)) {
      Map<Object, Object> dataMap = command.getMap();

      // Loop through all the keys and put those key, value pairings into lucene.

      for (Map.Entry entry : dataMap.entrySet()) {
        Object value = extractValue(entry.getValue());
        if (updateKnownTypesIfNeeded(value)) {
          updateIndexes(value, extractValue(entry.getKey()));
        }
      }
    }
    return mapPut;
  }

  @Override
  public Object visitClearCommand(InvocationContext ctx, ClearCommand command) throws Throwable {

    // This method is called when somebody calls a cache.clear() and we will need to wipe everything
    // in the indexes.
    Object returnValue = invokeNextInterceptor(ctx, command);

    if (shouldModifyIndexes(ctx)) {
      if (getLog().isTraceEnabled())
        getLog().trace("shouldModifyIndexes() is true and we can clear the indexes");

      for (Class c : this.knownClasses.keySet()) {
        EntityIndexBinder binder = this.searchFactory.getIndexBindingForEntity(c);
        if (binder != null) { // check as not all known classes are indexed
          searchFactory
              .getWorker()
              .performWork(
                  new Work<Object>(c, (Serializable) null, WorkType.PURGE_ALL),
                  new TransactionalEventTransactionContext(
                      transactionManager, transactionSynchronizationRegistry));
        }
      }
    }
    return returnValue;
  }

  // Method that will be called when data needs to be removed from Lucene.
  protected void removeFromIndexes(Object value, Object key) {

    // The key here is the String representation of the key that is stored in the cache.
    // The key is going to be the documentID for Lucene.
    // The object parameter is the actual value that needs to be removed from lucene.
    if (value == null) throw new NullPointerException("Cannot handle a null value!");
    TransactionContext transactionContext =
        new TransactionalEventTransactionContext(
            transactionManager, transactionSynchronizationRegistry);
    searchFactory
        .getWorker()
        .performWork(
            new Work<Object>(value, keyToString(key), WorkType.DELETE), transactionContext);
  }

  protected void updateIndexes(Object value, Object key) {
    // The key here is the String representation of the key that is stored in the cache.
    // The key is going to be the documentID for Lucene.
    // The object parameter is the actual value that needs to be removed from lucene.
    if (value == null) throw new NullPointerException("Cannot handle a null value!");
    TransactionContext transactionContext =
        new TransactionalEventTransactionContext(
            transactionManager, transactionSynchronizationRegistry);
    searchFactory
        .getWorker()
        .performWork(
            new Work<Object>(value, keyToString(key), WorkType.UPDATE), transactionContext);
  }

  private Object extractValue(Object wrappedValue) {
    if (wrappedValue instanceof MarshalledValue) return ((MarshalledValue) wrappedValue).get();
    else return wrappedValue;
  }

  public void enableClasses(Class<?>[] classes) {
    if (classes == null || classes.length == 0) {
      return;
    }
    enableClassesIncrementally(classes, false);
  }

  private void enableClassesIncrementally(Class<?>[] classes, boolean locked) {
    ArrayList<Class<?>> toAdd = null;
    for (Class<?> type : classes) {
      if (!knownClasses.containsKey(type)) {
        if (toAdd == null) toAdd = new ArrayList<Class<?>>(classes.length);
        toAdd.add(type);
      }
    }
    if (toAdd == null) {
      return;
    }
    if (locked) {
      Set<Class<?>> existingClasses = knownClasses.keySet();
      int index = existingClasses.size();
      Class[] all = existingClasses.toArray(new Class[existingClasses.size() + toAdd.size()]);
      for (Class<?> toAddClass : toAdd) {
        all[index++] = toAddClass;
      }
      searchFactory.addClasses(all);
      for (Class<?> type : toAdd) {
        if (searchFactory.getIndexBindingForEntity(type) != null) {
          knownClasses.put(type, Boolean.TRUE);
        } else {
          knownClasses.put(type, Boolean.FALSE);
        }
      }
    } else {
      mutating.lock();
      try {
        enableClassesIncrementally(classes, true);
      } finally {
        mutating.unlock();
      }
    }
  }

  private boolean updateKnownTypesIfNeeded(Object value) {
    if (value != null) {
      Class<?> potentialNewType = value.getClass();
      if (!this.knownClasses.containsKey(potentialNewType)) {
        mutating.lock();
        try {
          enableClassesIncrementally(new Class[] {potentialNewType}, true);
        } finally {
          mutating.unlock();
        }
      }
      return this.knownClasses.get(potentialNewType).booleanValue();
    } else {
      return false;
    }
  }

  public void registerKeyTransformer(
      Class<?> keyClass, Class<? extends Transformer> transformerClass) {
    keyTransformationHandler.registerTransformer(keyClass, transformerClass);
  }

  private String keyToString(Object key) {
    return keyTransformationHandler.keyToString(key);
  }

  public KeyTransformationHandler getKeyTransformationHandler() {
    return keyTransformationHandler;
  }
}
/**
 * Repository for {@link RemoteTransaction} and {@link
 * org.infinispan.transaction.xa.TransactionXaAdapter}s (locally originated transactions).
 *
 * @author [email protected]
 * @author Galder Zamarreño
 * @since 4.0
 */
@Listener
public class TransactionTable {

  public static final int CACHE_STOPPED_TOPOLOGY_ID = -1;
  private static final Log log = LogFactory.getLog(TransactionTable.class);

  private ConcurrentMap<Transaction, LocalTransaction> localTransactions;
  private ConcurrentMap<GlobalTransaction, LocalTransaction> globalToLocalTransactions;
  private ConcurrentMap<GlobalTransaction, RemoteTransaction> remoteTransactions;

  private final StaleTransactionCleanupService cleanupService =
      new StaleTransactionCleanupService(this);

  protected Configuration configuration;
  protected InvocationContextContainer icc;
  protected TransactionCoordinator txCoordinator;
  protected TransactionFactory txFactory;
  protected RpcManager rpcManager;
  protected CommandsFactory commandsFactory;
  private InterceptorChain invoker;
  private CacheNotifier notifier;
  private TransactionSynchronizationRegistry transactionSynchronizationRegistry;
  protected ClusteringDependentLogic clusteringLogic;
  protected boolean clustered = false;
  private Lock minTopologyRecalculationLock;
  private final ConcurrentMap<GlobalTransaction, Long> completedTransactions =
      ConcurrentMapFactory.makeConcurrentMap();

  /**
   * minTxTopologyId is the minimum topology ID across all ongoing local and remote transactions.
   */
  private volatile int minTxTopologyId = CACHE_STOPPED_TOPOLOGY_ID;

  private volatile int currentTopologyId = CACHE_STOPPED_TOPOLOGY_ID;
  private volatile boolean useStrictTopologyIdComparison = true;
  private String cacheName;

  @Inject
  public void initialize(
      RpcManager rpcManager,
      Configuration configuration,
      InvocationContextContainer icc,
      InterceptorChain invoker,
      CacheNotifier notifier,
      TransactionFactory gtf,
      TransactionCoordinator txCoordinator,
      TransactionSynchronizationRegistry transactionSynchronizationRegistry,
      CommandsFactory commandsFactory,
      ClusteringDependentLogic clusteringDependentLogic,
      Cache cache) {
    this.rpcManager = rpcManager;
    this.configuration = configuration;
    this.icc = icc;
    this.invoker = invoker;
    this.notifier = notifier;
    this.txFactory = gtf;
    this.txCoordinator = txCoordinator;
    this.transactionSynchronizationRegistry = transactionSynchronizationRegistry;
    this.commandsFactory = commandsFactory;
    this.clusteringLogic = clusteringDependentLogic;
    this.cacheName = cache.getName();
  }

  @Start(priority = 9) // Start before cache loader manager
  @SuppressWarnings("unused")
  private void start() {
    final int concurrencyLevel = configuration.locking().concurrencyLevel();
    localTransactions =
        ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
    globalToLocalTransactions =
        ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
    if (configuration.clustering().cacheMode().isClustered()) {
      minTopologyRecalculationLock = new ReentrantLock();
      // Only initialize this if we are clustered.
      remoteTransactions =
          ConcurrentMapFactory.makeConcurrentMap(concurrencyLevel, 0.75f, concurrencyLevel);
      cleanupService.start(cacheName, rpcManager, configuration);
      notifier.addListener(cleanupService);
      notifier.addListener(this);
      clustered = true;
    }
  }

  @Stop
  @SuppressWarnings("unused")
  private void stop() {
    if (clustered) {
      notifier.removeListener(cleanupService);
      cleanupService.stop();
      notifier.removeListener(this);
      currentTopologyId = CACHE_STOPPED_TOPOLOGY_ID; // indicate that the cache has stopped
    }
    shutDownGracefully();
  }

  public Set<Object> getLockedKeysForRemoteTransaction(GlobalTransaction gtx) {
    RemoteTransaction transaction = remoteTransactions.get(gtx);
    if (transaction == null) return InfinispanCollections.emptySet();
    return transaction.getLockedKeys();
  }

  public void remoteTransactionPrepared(GlobalTransaction gtx) {
    // do nothing
  }

  public void localTransactionPrepared(LocalTransaction localTransaction) {
    // nothing, only used by recovery
  }

  public void enlist(Transaction transaction, LocalTransaction localTransaction) {
    if (!localTransaction.isEnlisted()) {
      SynchronizationAdapter sync =
          new SynchronizationAdapter(
              localTransaction,
              txCoordinator,
              commandsFactory,
              rpcManager,
              this,
              clusteringLogic,
              configuration);
      if (transactionSynchronizationRegistry != null) {
        try {
          transactionSynchronizationRegistry.registerInterposedSynchronization(sync);
        } catch (Exception e) {
          log.failedSynchronizationRegistration(e);
          throw new CacheException(e);
        }

      } else {

        try {
          transaction.registerSynchronization(sync);
        } catch (Exception e) {
          log.failedSynchronizationRegistration(e);
          throw new CacheException(e);
        }
      }
      ((SyncLocalTransaction) localTransaction).setEnlisted(true);
    }
  }

  public void failureCompletingTransaction(Transaction tx) {
    final LocalTransaction localTransaction = localTransactions.get(tx);
    if (localTransaction != null) {
      removeLocalTransaction(localTransaction);
    }
  }

  /**
   * Returns true if the given transaction is already registered with the transaction table.
   *
   * @param tx if null false is returned
   */
  public boolean containsLocalTx(Transaction tx) {
    return tx != null && localTransactions.containsKey(tx);
  }

  public int getMinTopologyId() {
    return minTxTopologyId;
  }

  /**
   * Indicates if topology id comparisons should be strict if one wants to compare topology ids in
   * oder to tell if a transaction was started in an older topology than a second transaction. This
   * flag is true most of the time except when the current topology did not increase its id (it's
   * not caused by a rebalance).
   *
   * @return true if strict topology id comparisons should be used, false otherwise
   */
  public boolean useStrictTopologyIdComparison() {
    return useStrictTopologyIdComparison;
  }

  protected void updateStateOnNodesLeaving(Collection<Address> leavers) {
    Set<GlobalTransaction> toKill = new HashSet<GlobalTransaction>();
    for (GlobalTransaction gt : remoteTransactions.keySet()) {
      if (leavers.contains(gt.getAddress())) toKill.add(gt);
    }

    if (toKill.isEmpty())
      log.tracef(
          "No global transactions pertain to originator(s) %s who have left the cluster.", leavers);
    else
      log.tracef(
          "%s global transactions pertain to leavers list %s and need to be killed",
          toKill.size(), leavers);

    for (GlobalTransaction gtx : toKill) {
      log.tracef("Killing remote transaction originating on leaver %s", gtx);
      RollbackCommand rc = new RollbackCommand(cacheName, gtx);
      rc.init(invoker, icc, TransactionTable.this);
      try {
        rc.perform(null);
        log.tracef("Rollback of transaction %s complete.", gtx);
      } catch (Throwable e) {
        log.unableToRollbackGlobalTx(gtx, e);
      }
    }

    log.trace("Completed cleaning transactions originating on leavers");
  }

  /**
   * Returns the {@link RemoteTransaction} associated with the supplied transaction id. Returns null
   * if no such association exists.
   */
  public RemoteTransaction getRemoteTransaction(GlobalTransaction txId) {
    return remoteTransactions.get(txId);
  }

  public void remoteTransactionRollback(GlobalTransaction gtx) {
    final RemoteTransaction remove = removeRemoteTransaction(gtx);
    log.tracef("Removed local transaction %s? %b", gtx, remove);
  }

  /**
   * Creates and register a {@link RemoteTransaction}. Returns the created transaction.
   *
   * @throws IllegalStateException if an attempt to create a {@link RemoteTransaction} for an
   *     already registered id is made.
   */
  public RemoteTransaction createRemoteTransaction(
      GlobalTransaction globalTx, WriteCommand[] modifications) {
    return createRemoteTransaction(globalTx, modifications, currentTopologyId);
  }

  /**
   * Creates and register a {@link RemoteTransaction}. Returns the created transaction.
   *
   * @throws IllegalStateException if an attempt to create a {@link RemoteTransaction} for an
   *     already registered id is made.
   */
  public RemoteTransaction createRemoteTransaction(
      GlobalTransaction globalTx, WriteCommand[] modifications, int topologyId) {
    RemoteTransaction remoteTransaction =
        modifications == null
            ? txFactory.newRemoteTransaction(globalTx, topologyId)
            : txFactory.newRemoteTransaction(modifications, globalTx, topologyId);
    registerRemoteTransaction(globalTx, remoteTransaction);
    return remoteTransaction;
  }

  private void registerRemoteTransaction(GlobalTransaction gtx, RemoteTransaction rtx) {
    RemoteTransaction transaction = remoteTransactions.put(gtx, rtx);
    if (transaction != null) {
      log.remoteTxAlreadyRegistered();
      throw new IllegalStateException(
          "A remote transaction with the given id was already registered!!!");
    }

    log.tracef("Created and registered remote transaction %s", rtx);
    if (rtx.getTopologyId() < minTxTopologyId) {
      log.tracef(
          "Changing minimum topology ID from %d to %d", minTxTopologyId, rtx.getTopologyId());
      minTxTopologyId = rtx.getTopologyId();
    }
  }

  /**
   * Returns the {@link org.infinispan.transaction.xa.TransactionXaAdapter} corresponding to the
   * supplied transaction. If none exists, will be created first.
   */
  public LocalTransaction getOrCreateLocalTransaction(
      Transaction transaction, TxInvocationContext ctx) {
    LocalTransaction current = localTransactions.get(transaction);
    if (current == null) {
      Address localAddress = rpcManager != null ? rpcManager.getTransport().getAddress() : null;
      GlobalTransaction tx = txFactory.newGlobalTransaction(localAddress, false);
      current =
          txFactory.newLocalTransaction(
              transaction, tx, ctx.isImplicitTransaction(), currentTopologyId);
      log.tracef("Created a new local transaction: %s", current);
      localTransactions.put(transaction, current);
      globalToLocalTransactions.put(current.getGlobalTransaction(), current);
      notifier.notifyTransactionRegistered(tx, ctx);
    }
    return current;
  }

  /**
   * Removes the {@link org.infinispan.transaction.xa.TransactionXaAdapter} corresponding to the
   * given tx. Returns true if such an tx exists.
   */
  public boolean removeLocalTransaction(LocalTransaction localTransaction) {
    return localTransaction != null
        && (removeLocalTransactionInternal(localTransaction.getTransaction()) != null);
  }

  protected final LocalTransaction removeLocalTransactionInternal(Transaction tx) {
    LocalTransaction localTx = localTransactions.get(tx);
    if (localTx != null) {
      globalToLocalTransactions.remove(localTx.getGlobalTransaction());
      localTransactions.remove(tx);
      releaseResources(localTx);
    }
    return localTx;
  }

  private void releaseResources(CacheTransaction cacheTransaction) {
    if (cacheTransaction != null) {
      if (clustered) {
        recalculateMinTopologyIdIfNeeded(cacheTransaction);
      }
      log.tracef("Removed %s from transaction table.", cacheTransaction);
      cacheTransaction.notifyOnTransactionFinished();
    }
  }

  /** Removes the {@link RemoteTransaction} corresponding to the given tx. */
  public void remoteTransactionCommitted(GlobalTransaction gtx) {
    if (Configurations.isSecondPhaseAsync(configuration)) {
      removeRemoteTransaction(gtx);
    }
  }

  public final RemoteTransaction removeRemoteTransaction(GlobalTransaction txId) {
    RemoteTransaction removed = remoteTransactions.remove(txId);
    log.tracef("Removed remote transaction %s ? %s", txId, removed);
    releaseResources(removed);
    return removed;
  }

  public int getRemoteTxCount() {
    return remoteTransactions.size();
  }

  public int getLocalTxCount() {
    return localTransactions.size();
  }

  /**
   * Looks up a LocalTransaction given a GlobalTransaction.
   *
   * @param txId the global transaction identifier
   * @return the LocalTransaction or null if not found
   */
  public LocalTransaction getLocalTransaction(GlobalTransaction txId) {
    return globalToLocalTransactions.get(txId);
  }

  public LocalTransaction getLocalTransaction(Transaction tx) {
    return localTransactions.get(tx);
  }

  public boolean containRemoteTx(GlobalTransaction globalTransaction) {
    return remoteTransactions.containsKey(globalTransaction);
  }

  public Collection<RemoteTransaction> getRemoteTransactions() {
    return remoteTransactions.values();
  }

  public Collection<LocalTransaction> getLocalTransactions() {
    return localTransactions.values();
  }

  protected final void recalculateMinTopologyIdIfNeeded(CacheTransaction removedTransaction) {
    if (removedTransaction == null)
      throw new IllegalArgumentException("Transaction cannot be null!");
    if (currentTopologyId != CACHE_STOPPED_TOPOLOGY_ID) {

      // Assume that we only get here if we are clustered.
      int removedTransactionTopologyId = removedTransaction.getTopologyId();
      if (removedTransactionTopologyId < minTxTopologyId) {
        log.tracef(
            "A transaction has a topology ID (%s) that is smaller than the smallest transaction topology ID (%s) this node knows about!  This can happen if a concurrent thread recalculates the minimum topology ID after the current transaction has been removed from the transaction table.",
            removedTransactionTopologyId, minTxTopologyId);
      } else if (removedTransactionTopologyId == minTxTopologyId
          && removedTransactionTopologyId < currentTopologyId) {
        // We should only need to re-calculate the minimum topology ID if the transaction being
        // completed
        // has the same ID as the smallest known transaction ID, to check what the new smallest is,
        // and this is
        // not the current topology ID.
        calculateMinTopologyId(removedTransactionTopologyId);
      }
    }
  }

  @TopologyChanged
  @SuppressWarnings("unused")
  public void onTopologyChange(TopologyChangedEvent<?, ?> tce) {
    // don't do anything if this cache is not clustered
    if (clustered) {
      if (tce.isPre()) {
        useStrictTopologyIdComparison = tce.getNewTopologyId() != currentTopologyId;
        currentTopologyId = tce.getNewTopologyId();
      } else {
        log.debugf("Topology changed, recalculating minTopologyId");
        calculateMinTopologyId(-1);
      }
    }
  }

  /**
   * This method calculates the minimum topology ID known by the current node. This method is only
   * used in a clustered cache, and only invoked when either a topology change is detected, or a
   * transaction whose topology ID is not the same as the current topology ID.
   *
   * <p>This method is guarded by minTopologyRecalculationLock to prevent concurrent updates to the
   * minimum topology ID field.
   *
   * @param idOfRemovedTransaction the topology ID associated with the transaction that triggered
   *     this recalculation, or -1 if triggered by a topology change event.
   */
  @GuardedBy("minTopologyRecalculationLock")
  private void calculateMinTopologyId(int idOfRemovedTransaction) {
    minTopologyRecalculationLock.lock();
    try {
      // We should only need to re-calculate the minimum topology ID if the transaction being
      // completed
      // has the same ID as the smallest known transaction ID, to check what the new smallest is.
      // We do this check
      // again here, since this is now within a synchronized method.
      if (idOfRemovedTransaction == -1
          || (idOfRemovedTransaction == minTxTopologyId
              && idOfRemovedTransaction < currentTopologyId)) {
        int minTopologyIdFound = currentTopologyId;

        for (CacheTransaction ct : localTransactions.values()) {
          int topologyId = ct.getTopologyId();
          if (topologyId < minTopologyIdFound) minTopologyIdFound = topologyId;
        }
        for (CacheTransaction ct : remoteTransactions.values()) {
          int topologyId = ct.getTopologyId();
          if (topologyId < minTopologyIdFound) minTopologyIdFound = topologyId;
        }
        if (minTopologyIdFound != minTxTopologyId) {
          log.tracef(
              "Changing minimum topology ID from %s to %s", minTxTopologyId, minTopologyIdFound);
          minTxTopologyId = minTopologyIdFound;
        } else {
          log.tracef("Minimum topology ID still is %s; nothing to change", minTopologyIdFound);
        }
      }
    } finally {
      minTopologyRecalculationLock.unlock();
    }
  }

  private boolean areTxsOnGoing() {
    return !localTransactions.isEmpty()
        || (remoteTransactions != null && !remoteTransactions.isEmpty());
  }

  private void shutDownGracefully() {
    if (log.isDebugEnabled())
      log.debugf(
          "Wait for on-going transactions to finish for %s.",
          Util.prettyPrintTime(
              configuration.transaction().cacheStopTimeout(), TimeUnit.MILLISECONDS));
    long failTime = currentMillisFromNanotime() + configuration.transaction().cacheStopTimeout();
    boolean txsOnGoing = areTxsOnGoing();
    while (txsOnGoing && currentMillisFromNanotime() < failTime) {
      try {
        Thread.sleep(30);
        txsOnGoing = areTxsOnGoing();
      } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        if (clustered) {
          log.debugf(
              "Interrupted waiting for on-going transactions to finish. %s local transactions and %s remote transactions",
              localTransactions.size(), remoteTransactions.size());
        } else {
          log.debugf(
              "Interrupted waiting for %s on-going transactions to finish.",
              localTransactions.size());
        }
      }
    }

    if (txsOnGoing) {
      log.unfinishedTransactionsRemain(
          localTransactions == null ? 0 : localTransactions.size(),
          remoteTransactions == null ? 0 : remoteTransactions.size());
    } else {
      log.debug("All transactions terminated");
    }
  }

  /**
   * With the current state transfer implementation it is possible for a transaction to be prepared
   * several times on a remote node. This might cause leaks, e.g. if the transaction is prepared,
   * committed and prepared again. Once marked as completed (because of commit or rollback) any
   * further prepare received on that transaction are discarded.
   */
  public void markTransactionCompleted(GlobalTransaction globalTx) {
    completedTransactions.put(globalTx, System.nanoTime());
  }

  /** @see #markTransactionCompleted(org.infinispan.transaction.xa.GlobalTransaction) */
  public boolean isTransactionCompleted(GlobalTransaction gtx) {
    return completedTransactions.containsKey(gtx);
  }

  public void cleanupCompletedTransactions() {
    if (!completedTransactions.isEmpty()) {
      try {
        log.tracef(
            "About to cleanup completed transaction. Initial size is %d",
            completedTransactions.size());
        // this iterator is weekly consistent and will never throw ConcurrentModificationException
        Iterator<Map.Entry<GlobalTransaction, Long>> iterator =
            completedTransactions.entrySet().iterator();
        long timeout = configuration.transaction().completedTxTimeout();

        int removedEntries = 0;
        long beginning = System.nanoTime();
        while (iterator.hasNext()) {
          Map.Entry<GlobalTransaction, Long> e = iterator.next();
          long ageNanos = System.nanoTime() - e.getValue();
          if (TimeUnit.NANOSECONDS.toMillis(ageNanos) >= timeout) {
            iterator.remove();
            removedEntries++;
          }
        }
        long duration = System.nanoTime() - beginning;

        log.tracef(
            "Finished cleaning up completed transactions. %d transactions were removed, total duration was %d millis, "
                + "current number of completed transactions is %d",
            removedEntries, TimeUnit.NANOSECONDS.toMillis(duration), completedTransactions.size());
      } catch (Exception e) {
        log.errorf(e, "Failed to cleanup completed transactions: %s", e.getMessage());
      }
    }
  }
}