Ejemplo n.º 1
0
  public NotifyingNotifiableFuture<Object> flushCache(
      Collection<Object> keys, Object retval, Address origin) {
    if (trace) log.tracef("Invalidating L1 caches for keys %s", keys);

    NotifyingNotifiableFuture<Object> future = new AggregatingNotifyingFutureImpl(retval, 2);

    Collection<Address> invalidationAddresses = buildInvalidationAddressList(keys, origin);

    int nodes = invalidationAddresses.size();

    if (nodes > 0) {
      // No need to invalidate at all if there is no one to invalidate!
      boolean multicast = isUseMulticast(nodes);

      if (trace)
        log.tracef(
            "There are %s nodes involved in invalidation. Threshold is: %s; using multicast: %s",
            nodes, threshold, multicast);

      if (multicast) {
        if (trace) log.tracef("Invalidating keys %s via multicast", keys);
        InvalidateCommand ic = commandsFactory.buildInvalidateFromL1Command(origin, false, keys);
        rpcManager.broadcastRpcCommandInFuture(ic, future);
      } else {
        InvalidateCommand ic = commandsFactory.buildInvalidateFromL1Command(origin, false, keys);

        // Ask the caches who have requested from us to remove
        if (trace) log.tracef("Keys %s needs invalidation on %s", keys, invalidationAddresses);
        rpcManager.invokeRemotelyInFuture(
            invalidationAddresses, ic, true, future, rpcTimeout, true);
        return future;
      }
    } else if (trace) log.trace("No L1 caches to invalidate");
    return future;
  }
Ejemplo n.º 2
0
 @Override
 public InvalidateCommand buildInvalidateFromL1Command(
     Address origin, boolean forRehash, Set<Flag> flags, Collection<Object> keys) {
   return actual.buildInvalidateFromL1Command(origin, forRehash, flags, keys);
 }
Ejemplo n.º 3
0
  private void invalidateSegments(Set<Integer> newSegments, Set<Integer> segmentsToL1) {
    // The actual owners keep track of the nodes that hold a key in L1 ("requestors") and
    // they invalidate the key on every requestor after a change.
    // But this information is only present on the owners where the ClusteredGetKeyValueCommand
    // got executed - if the requestor only contacted one owner, and that node is no longer an owner
    // (perhaps because it left the cluster), the other owners will not know to invalidate the key
    // on that requestor. Furthermore, the requestors list is not copied to the new owners during
    // state transfers.
    // To compensate for this, we delete all L1 entries in segments that changed ownership during
    // this topology update. We can't actually differentiate between L1 entries and regular entries,
    // so we delete all entries that don't belong to this node in the current OR previous topology.
    Set<Object> keysToL1 = new HashSet<Object>();
    Set<Object> keysToRemove = new HashSet<Object>();

    // gather all keys from data container that belong to the segments that are being removed/moved
    // to L1
    for (InternalCacheEntry ice : dataContainer) {
      Object key = ice.getKey();
      int keySegment = getSegment(key);
      if (segmentsToL1.contains(keySegment)) {
        keysToL1.add(key);
      } else if (!newSegments.contains(keySegment)) {
        keysToRemove.add(key);
      }
    }

    // gather all keys from cache store that belong to the segments that are being removed/moved to
    // L1
    CacheStore cacheStore = getCacheStore();
    if (cacheStore != null) {
      // todo [anistor] extend CacheStore interface to be able to specify a filter when loading keys
      // (ie. keys should belong to desired segments)
      try {
        Set<Object> storedKeys =
            cacheStore.loadAllKeys(new ReadOnlyDataContainerBackedKeySet(dataContainer));
        for (Object key : storedKeys) {
          int keySegment = getSegment(key);
          if (segmentsToL1.contains(keySegment)) {
            keysToL1.add(key);
          } else if (!newSegments.contains(keySegment)) {
            keysToRemove.add(key);
          }
        }

      } catch (CacheLoaderException e) {
        log.failedLoadingKeysFromCacheStore(e);
      }
    }

    if (configuration.clustering().l1().onRehash()) {
      log.debugf("Moving to L1 state for segments %s of cache %s", segmentsToL1, cacheName);
    } else {
      log.debugf("Removing state for segments %s of cache %s", segmentsToL1, cacheName);
    }
    if (!keysToL1.isEmpty()) {
      try {
        InvalidateCommand invalidateCmd =
            commandsFactory.buildInvalidateFromL1Command(
                true, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToL1);
        InvocationContext ctx = icc.createNonTxInvocationContext();
        interceptorChain.invoke(ctx, invalidateCmd);

        log.debugf(
            "Invalidated %d keys, data container now has %d keys",
            keysToL1.size(), dataContainer.size());
        if (trace) log.tracef("Invalidated keys: %s", keysToL1);
      } catch (CacheException e) {
        log.failedToInvalidateKeys(e);
      }
    }

    log.debugf(
        "Removing L1 state for segments not in %s or %s for cache %s",
        newSegments, segmentsToL1, cacheName);
    if (!keysToRemove.isEmpty()) {
      try {
        InvalidateCommand invalidateCmd =
            commandsFactory.buildInvalidateFromL1Command(
                false, EnumSet.of(CACHE_MODE_LOCAL, SKIP_LOCKING), keysToRemove);
        InvocationContext ctx = icc.createNonTxInvocationContext();
        interceptorChain.invoke(ctx, invalidateCmd);

        log.debugf(
            "Invalidated %d keys, data container of cache %s now has %d keys",
            keysToRemove.size(), cacheName, dataContainer.size());
        if (trace) log.tracef("Invalidated keys: %s", keysToRemove);
      } catch (CacheException e) {
        log.failedToInvalidateKeys(e);
      }
    }

    // todo [anistor] call CacheNotifier.notifyDataRehashed
  }
Ejemplo n.º 4
0
 @Override
 public InvalidateCommand buildInvalidateFromL1Command(
     boolean forRehash, Set<Flag> flags, Object... keys) {
   return actual.buildInvalidateFromL1Command(forRehash, flags, keys);
 }