@Override
  protected boolean isProperWriter(InvocationContext ctx, FlagAffectedCommand command, Object key) {
    if (command.hasFlag(Flag.SKIP_OWNERSHIP_CHECK)) return true;

    if (loaderConfig.shared()) {
      if (!dm.getPrimaryLocation(key).equals(address)) {
        log.tracef(
            "Skipping cache store since the cache loader is shared "
                + "and the caller is not the first owner of the key %s",
            key);
        return false;
      }
    } else {
      if (isUsingLockDelegation && !command.hasFlag(Flag.CACHE_MODE_LOCAL)) {
        if (ctx.isOriginLocal() && !dm.getPrimaryLocation(key).equals(address)) {
          // The command will be forwarded back to the originator, and the value will be stored then
          // (while holding the lock on the primary owner).
          log.tracef(
              "Skipping cache store on the originator because it is not the primary owner "
                  + "of key %s",
              key);
          return false;
        }
      }
      if (!dm.getWriteConsistentHash().isKeyLocalToNode(address, key)) {
        log.tracef("Skipping cache store since the key is not local: %s", key);
        return false;
      }
    }
    return true;
  }
 @Override
 public boolean hasAffinity(K key) {
   DistributionManager dist = this.cache.getAdvancedCache().getDistributionManager();
   if (dist != null) {
     DataLocality locality = dist.getLocality(key);
     return locality.isLocal() || locality.isUncertain();
   }
   return true;
 }
  @Override
  public Object visitPrepareCommand(TxInvocationContext ctx, PrepareCommand command)
      throws Throwable {
    Object retVal = invokeNextInterceptor(ctx, command);

    boolean sync = isSynchronous(ctx);

    if (shouldInvokeRemoteTxCommand(ctx)) {
      int newCacheViewId = -1;
      stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId);

      if (command.isOnePhaseCommit())
        flushL1Caches(ctx); // if we are one-phase, don't block on this future.

      Collection<Address> recipients = dm.getAffectedNodes(ctx.getAffectedKeys());
      prepareOnAffectedNodes(ctx, command, recipients, sync);

      ((LocalTxInvocationContext) ctx).remoteLocksAcquired(recipients);
    } else if (isL1CacheEnabled
        && command.isOnePhaseCommit()
        && !ctx.isOriginLocal()
        && !ctx.getLockedKeys().isEmpty()) {
      // We fall into this block if we are a remote node, happen to be the primary data owner and
      // have locked keys.
      // it is still our responsibility to invalidate L1 caches in the cluster.
      flushL1Caches(ctx);
    }
    return retVal;
  }
  private void sendCommitCommand(
      TxInvocationContext ctx, CommitCommand command, Collection<Address> preparedOn)
      throws TimeoutException, InterruptedException {
    // we only send the commit command to the nodes that
    Collection<Address> recipients = dm.getAffectedNodes(ctx.getAffectedKeys());

    // By default, use the configured commit sync settings
    boolean syncCommitPhase = configuration.isSyncCommitPhase();
    for (Address a : preparedOn) {
      if (!recipients.contains(a)) {
        // However if we have prepared on some nodes and are now committing on different nodes, make
        // sure we
        // force sync commit so we can respond to prepare resend requests.
        syncCommitPhase = true;
      }
    }

    Map<Address, Response> responses =
        rpcManager.invokeRemotely(recipients, command, syncCommitPhase, true);
    if (!responses.isEmpty()) {
      List<Address> resendTo = new LinkedList<Address>();
      for (Map.Entry<Address, Response> r : responses.entrySet()) {
        if (needToResendPrepare(r.getValue())) resendTo.add(r.getKey());
      }

      if (!resendTo.isEmpty()) {
        log.debugf(
            "Need to resend prepares for %s to %s", command.getGlobalTransaction(), resendTo);
        PrepareCommand pc = buildPrepareCommandForResend(ctx, command);
        rpcManager.invokeRemotely(resendTo, pc, true, true);
      }
    }
  }
  @Override
  public Object visitPutMapCommand(InvocationContext ctx, PutMapCommand command) throws Throwable {
    Object returnValue = invokeNextInterceptor(ctx, command);
    if (!isStoreEnabled(command) || ctx.isInTxScope()) return returnValue;

    Map<Object, Object> map = command.getMap();
    int count = 0;
    for (Object key : map.keySet()) {
      // In non-tx mode, a node may receive the same forwarded PutMapCommand many times - but each
      // time
      // it must write only the keys locked on the primary owner that forwarded the command
      if (isUsingLockDelegation
          && command.isForwarded()
          && !dm.getPrimaryLocation(key).equals(ctx.getOrigin())) continue;

      if (isProperWriter(ctx, command, key)) {
        InternalCacheEntry se = getStoredEntry(key, ctx);
        store.store(se);
        log.tracef("Stored entry %s under key %s", se, key);
        count++;
      }
    }
    if (getStatisticsEnabled()) cacheStores.getAndAdd(count);
    return returnValue;
  }
 @Override
 public Response getResponse(CacheRpcCommand command, Object returnValue) {
   if (command.getCommandId() == ClusteredGetCommand.COMMAND_ID) {
     if (returnValue == null) return null;
     ClusteredGetCommand clusteredGet = (ClusteredGetCommand) command;
     if (distributionManager.isAffectedByRehash(clusteredGet.getKey()))
       return UnsureResponse.INSTANCE;
     return SuccessfulResponse.create(returnValue);
   } else if (command instanceof SingleRpcCommand) {
     SingleRpcCommand src = (SingleRpcCommand) command;
     ReplicableCommand c = src.getCommand();
     byte commandId = c.getCommandId();
     if (c instanceof WriteCommand) {
       if (returnValue == null) return null;
       // check if this is successful.
       WriteCommand wc = (WriteCommand) c;
       return handleWriteCommand(wc, returnValue);
     } else if (commandId == MapCombineCommand.COMMAND_ID
         || commandId == ReduceCommand.COMMAND_ID
         || commandId == DistributedExecuteCommand.COMMAND_ID) {
       // Even null values should be wrapped in this case.
       return SuccessfulResponse.create(returnValue);
     } else if (c.isReturnValueExpected()) {
       if (returnValue == null) return null;
       return SuccessfulResponse.create(returnValue);
     }
   } else if (command.isReturnValueExpected()) {
     return SuccessfulResponse.create(returnValue);
   }
   return null; // no unnecessary response values!
 }
 @Override
 public List<Address> getOwners(Collection<Object> affectedKeys) {
   if (affectedKeys.isEmpty()) {
     return InfinispanCollections.emptyList();
   }
   return Immutables.immutableListConvert(dm.locateAll(affectedKeys));
 }
 /**
  * Method that skips invocation if: - The store is a shared one and node storing the key is not
  * the 1st owner of the key or, - This is an L1 put operation.
  */
 @Override
 protected boolean skipKey(Object key) {
   if (loaderConfig.shared()) {
     if (!dm.getPrimaryLocation(key).equals(address)) {
       log.trace(
           "Skipping cache store since the cache loader is shared "
               + "and the caller is not the first owner of the key");
       return true;
     }
   } else {
     List<Address> addresses = dm.locate(key);
     if (isL1Put(addresses)) {
       log.trace("Skipping cache store since this is an L1 put");
       return true;
     }
   }
   return false;
 }
 protected <KIn> Set<KIn> filterLocalPrimaryOwner(Set<KIn> nodeLocalKeys, DistributionManager dm) {
   Set<KIn> selectedKeys = new HashSet<KIn>();
   for (KIn key : nodeLocalKeys) {
     Address primaryLocation = dm != null ? dm.getPrimaryLocation(key) : cdl.getAddress();
     if (primaryLocation != null && primaryLocation.equals(cdl.getAddress())) {
       selectedKeys.add(key);
     }
   }
   return selectedKeys;
 }
  private Object realRemoteGet(
      InvocationContext ctx, Object key, boolean storeInL1, boolean isWrite) throws Throwable {
    if (trace) log.tracef("Doing a remote get for key %s", key);

    boolean acquireRemoteLock = false;
    if (ctx.isInTxScope()) {
      TxInvocationContext txContext = (TxInvocationContext) ctx;
      acquireRemoteLock =
          isWrite && isPessimisticCache && !txContext.getAffectedKeys().contains(key);
    }
    // attempt a remote lookup
    InternalCacheEntry ice = dm.retrieveFromRemoteSource(key, ctx, acquireRemoteLock);

    if (acquireRemoteLock) {
      ((TxInvocationContext) ctx).addAffectedKey(key);
    }

    if (ice != null) {
      if (storeInL1) {
        if (isL1CacheEnabled) {
          if (trace) log.tracef("Caching remotely retrieved entry for key %s in L1", key);
          // This should be fail-safe
          try {
            long lifespan =
                ice.getLifespan() < 0
                    ? configuration.getL1Lifespan()
                    : Math.min(ice.getLifespan(), configuration.getL1Lifespan());
            PutKeyValueCommand put =
                cf.buildPutKeyValueCommand(
                    ice.getKey(), ice.getValue(), lifespan, -1, ctx.getFlags());
            lockAndWrap(ctx, key, ice);
            invokeNextInterceptor(ctx, put);
          } catch (Exception e) {
            // Couldn't store in L1 for some reason.  But don't fail the transaction!
            log.infof("Unable to store entry %s in L1 cache", key);
            log.debug("Inability to store in L1 caused by", e);
          }
        } else {
          CacheEntry ce = ctx.lookupEntry(key);
          if (ce == null || ce.isNull() || ce.isLockPlaceholder() || ce.getValue() == null) {
            if (ce != null && ce.isChanged()) {
              ce.setValue(ice.getValue());
            } else {
              if (isWrite) lockAndWrap(ctx, key, ice);
              else ctx.putLookedUpEntry(key, ice);
            }
          }
        }
      } else {
        if (trace) log.tracef("Not caching remotely retrieved entry for key %s in L1", key);
      }
      return ice.getValue();
    }
    return null;
  }
 @Override
 public Object visitLockControlCommand(TxInvocationContext ctx, LockControlCommand command)
     throws Throwable {
   if (ctx.isOriginLocal()) {
     int newCacheViewId = -1;
     stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId);
     final Collection<Address> affectedNodes = dm.getAffectedNodes(command.getKeys());
     ((LocalTxInvocationContext) ctx).remoteLocksAcquired(affectedNodes);
     rpcManager.invokeRemotely(affectedNodes, command, true, true);
   }
   return invokeNextInterceptor(ctx, command);
 }
  @Override
  public Object visitRollbackCommand(TxInvocationContext ctx, RollbackCommand command)
      throws Throwable {
    if (shouldInvokeRemoteTxCommand(ctx)) {
      rpcManager.invokeRemotely(
          dm.getAffectedNodes(ctx.getAffectedKeys()),
          command,
          configuration.isSyncRollbackPhase(),
          true);
    }

    return invokeNextInterceptor(ctx, command);
  }
 @Override
 public <T> Map<Address, List<T>> mapKeysToNodes(
     DistributionManager dm,
     String taskId,
     Collection<T> keysToMap,
     boolean useIntermediateCompositeKey) {
   Map<Address, List<T>> addressToKey = new HashMap<Address, List<T>>();
   for (T key : keysToMap) {
     Address ownerOfKey = null;
     if (useIntermediateCompositeKey) {
       ownerOfKey = dm.getPrimaryLocation(new IntermediateCompositeKey<T>(taskId, key));
     } else {
       ownerOfKey = dm.getPrimaryLocation(key);
     }
     List<T> keysAtNode = addressToKey.get(ownerOfKey);
     if (keysAtNode == null) {
       keysAtNode = new ArrayList<T>();
       addressToKey.put(ownerOfKey, keysAtNode);
     }
     keysAtNode.add(key);
   }
   return addressToKey;
 }
 @ManagedAttribute(
     description = "Number of replicas for each key",
     displayName = "Replication Degree")
 public double getReplicationDegree() {
   if (distributionManager != null) {
     // distributed mode
     return distributionManager.getConsistentHash().getNumOwners();
   } else if (rpcManager != null) {
     // replicated or other clustered mode
     return this.rpcManager.getTransport().getMembers().size();
   }
   // local mode
   return 1;
 }
  /**
   * This method retrieves an entry from a remote cache and optionally stores it in L1 (if L1 is
   * enabled).
   *
   * <p>This method only works if a) this is a locally originating invocation and b) the entry in
   * question is not local to the current cache instance and c) the entry is not in L1. If either of
   * a, b or c does not hold true, this method returns a null and doesn't do anything.
   *
   * @param ctx invocation context
   * @param key key to retrieve
   * @return value of a remote get, or null
   * @throws Throwable if there are problems
   */
  private Object remoteGetAndStoreInL1(InvocationContext ctx, Object key, boolean isWrite)
      throws Throwable {
    DataLocality locality = dm.getLocality(key);

    if (ctx.isOriginLocal() && !locality.isLocal() && isNotInL1(key)) {
      return realRemoteGet(ctx, key, true, isWrite);
    } else {
      // maybe we are still rehashing as a joiner? ISPN-258
      if (locality.isUncertain()) {
        if (trace)
          log.tracef(
              "Key %s is mapped to local node %s, but a rehash is in progress so may need to look elsewhere",
              key, rpcManager.getAddress());
        // try a remote lookup all the same
        return realRemoteGet(ctx, key, false, isWrite);
      } else {
        if (trace)
          log.tracef(
              "Not doing a remote get for key %s since entry is mapped to current node (%s), or is in L1.  Owners are %s",
              key, rpcManager.getAddress(), dm.locate(key));
      }
    }
    return null;
  }
예제 #16
0
 /**
  * Encodes the cases for an asyncGet operation in which it makes sense to actually perform the
  * operation in sync.
  *
  * @param flags
  * @param key
  * @return true if we skip the thread (performing it in sync)
  */
 private boolean asyncSkipsThread(EnumSet<Flag> flags, K key) {
   boolean isSkipLoader = isSkipLoader(flags);
   if (!isSkipLoader) {
     // if we can't skip the cacheloader, we really want a thread for async.
     return false;
   }
   CacheMode cacheMode = config.getCacheMode();
   if (!cacheMode.isDistributed()) {
     // in these cluster modes we won't RPC for a get, so no need to fork a thread.
     return true;
   } else if (flags != null
       && (flags.contains(Flag.SKIP_REMOTE_LOOKUP) || flags.contains(Flag.CACHE_MODE_LOCAL))) {
     // with these flags we won't RPC either
     return true;
   }
   // finally, we will skip the thread if the key maps to the local node
   return distributionManager.getLocality(key).isLocal();
 }
예제 #17
0
  <R> R performOperation(
      Function<? super S2, ? extends R> function,
      ResultsAccumulator<R> remoteResults,
      Predicate<? super R> earlyTerminatePredicate) {
    ConsistentHash ch = dm.getConsistentHash();
    TerminalOperation<R> op =
        new SingleRunOperation(
            intermediateOperations, supplierForSegments(ch, segmentsToFilter, null), function);
    Object id =
        csm.remoteStreamOperation(
            getParallelDistribution(),
            parallel,
            ch,
            segmentsToFilter,
            keysToFilter,
            Collections.emptyMap(),
            includeLoader,
            op,
            remoteResults,
            earlyTerminatePredicate);
    try {
      R localValue = op.performOperation();
      remoteResults.onCompletion(null, Collections.emptySet(), localValue);
      if (id != null) {
        try {
          if ((earlyTerminatePredicate == null || !earlyTerminatePredicate.test(localValue))
              && !csm.awaitCompletion(id, timeout, timeoutUnit)) {
            throw new TimeoutException();
          }
        } catch (InterruptedException e) {
          throw new CacheException(e);
        }
      }

      log.tracef("Finished operation for id %s", id);

      return remoteResults.currentValue;
    } finally {
      csm.forgetOperation(id);
    }
  }
  /**
   * If we are within one transaction we won't do any replication as replication would only be
   * performed at commit time. If the operation didn't originate locally we won't do any replication
   * either.
   */
  private Object handleWriteCommand(
      InvocationContext ctx,
      WriteCommand command,
      RecipientGenerator recipientGenerator,
      boolean skipRemoteGet,
      boolean skipL1Invalidation)
      throws Throwable {
    // see if we need to load values from remote srcs first
    if (ctx.isOriginLocal() && !skipRemoteGet)
      remoteGetBeforeWrite(ctx, command.isConditional(), recipientGenerator);
    boolean sync = isSynchronous(ctx);

    // if this is local mode then skip distributing
    if (isLocalModeForced(ctx)) {
      return invokeNextInterceptor(ctx, command);
    }

    // FIRST pass this call up the chain.  Only if it succeeds (no exceptions) locally do we attempt
    // to distribute.
    Object returnValue = invokeNextInterceptor(ctx, command);

    if (command.isSuccessful()) {

      if (!ctx.isInTxScope()) {
        NotifyingNotifiableFuture<Object> futureToReturn = null;
        Future<?> invalidationFuture = null;
        if (ctx.isOriginLocal()) {
          int newCacheViewId = -1;
          stateTransferLock.waitForStateTransferToEnd(ctx, command, newCacheViewId);
          List<Address> rec = recipientGenerator.generateRecipients();
          int numCallRecipients = rec == null ? 0 : rec.size();
          if (trace) log.tracef("Invoking command %s on hosts %s", command, rec);

          boolean useFuture = ctx.isUseFutureReturnType();
          if (isL1CacheEnabled && !skipL1Invalidation)
            // Handle the case where the put is local. If in unicast mode and this is not a data
            // owner, nothing happens. If in multicast mode, we this node will send the multicast
            if (rpcManager.getTransport().getMembers().size() > numCallRecipients) {
              // Command was successful, we have a number of receipients and L1 should be flushed,
              // so request any L1 invalidations from this node
              if (trace)
                log.tracef(
                    "Put occuring on node, requesting L1 cache invalidation for keys %s. Other data owners are %s",
                    command.getAffectedKeys(), dm.getAffectedNodes(command.getAffectedKeys()));
              if (useFuture) {
                futureToReturn =
                    l1Manager.flushCache(
                        recipientGenerator.getKeys(),
                        returnValue,
                        ctx.getOrigin(),
                        !(command instanceof RemoveCommand));
              } else {
                invalidationFuture =
                    l1Manager.flushCacheWithSimpleFuture(
                        recipientGenerator.getKeys(),
                        returnValue,
                        ctx.getOrigin(),
                        !(command instanceof RemoveCommand));
              }
            } else {
              if (trace)
                log.tracef("Not performing invalidation! numCallRecipients=%s", numCallRecipients);
            }
          if (!isSingleOwnerAndLocal(recipientGenerator)) {
            if (useFuture) {
              if (futureToReturn == null) futureToReturn = new NotifyingFutureImpl(returnValue);
              rpcManager.invokeRemotelyInFuture(rec, command, futureToReturn);
              return futureToReturn;
            } else {
              rpcManager.invokeRemotely(rec, command, sync);
            }
          } else if (useFuture && futureToReturn != null) {
            return futureToReturn;
          }
          if (invalidationFuture != null && sync) {
            invalidationFuture.get(); // wait for the inval command to complete
            if (trace) log.tracef("Finished invalidating keys %s ", recipientGenerator.getKeys());
          }
        } else {
          // Piggyback remote puts and cause L1 invalidations
          if (isL1CacheEnabled && !skipL1Invalidation) {
            // Command was successful and L1 should be flushed, so request any L1 invalidations from
            // this node
            if (trace)
              log.tracef(
                  "Put occuring on node, requesting cache invalidation for keys %s. Origin of command is remote",
                  command.getAffectedKeys());
            // If this is a remove command, then don't pass in the origin - since the entru would be
            // removed from the origin's L1 cache.
            invalidationFuture =
                l1Manager.flushCacheWithSimpleFuture(
                    recipientGenerator.getKeys(),
                    returnValue,
                    ctx.getOrigin(),
                    !(command instanceof RemoveCommand));
            if (sync) {
              invalidationFuture.get(); // wait for the inval command to complete
              if (trace) log.tracef("Finished invalidating keys %s ", recipientGenerator.getKeys());
            }
          }
        }
      }
    }
    return returnValue;
  }
 @Override
 public boolean localNodeIsOwner(Object key) {
   return dm.getLocality(key).isLocal();
 }
예제 #20
0
  <R> R performOperationRehashAware(
      Function<? super S2, ? extends R> function,
      boolean retryOnRehash,
      ResultsAccumulator<R> remoteResults,
      Predicate<? super R> earlyTerminatePredicate) {
    Set<Integer> segmentsToProcess = segmentsToFilter;
    TerminalOperation<R> op;
    do {
      ConsistentHash ch = dm.getReadConsistentHash();
      if (retryOnRehash) {
        op =
            new SegmentRetryingOperation(
                intermediateOperations, supplierForSegments(ch, segmentsToProcess, null), function);
      } else {
        op =
            new SingleRunOperation(
                intermediateOperations, supplierForSegments(ch, segmentsToProcess, null), function);
      }
      Object id =
          csm.remoteStreamOperationRehashAware(
              getParallelDistribution(),
              parallel,
              ch,
              segmentsToProcess,
              keysToFilter,
              Collections.emptyMap(),
              includeLoader,
              op,
              remoteResults,
              earlyTerminatePredicate);
      try {
        R localValue;
        boolean localRun = ch.getMembers().contains(localAddress);
        if (localRun) {
          localValue = op.performOperation();
          // TODO: we can do this more efficiently - since we drop all results locally
          if (dm.getReadConsistentHash().equals(ch)) {
            Set<Integer> ourSegments = ch.getPrimarySegmentsForOwner(localAddress);
            if (segmentsToProcess != null) {
              ourSegments.retainAll(segmentsToProcess);
            }
            remoteResults.onCompletion(null, ourSegments, localValue);
          } else {
            if (segmentsToProcess != null) {
              Set<Integer> ourSegments = ch.getPrimarySegmentsForOwner(localAddress);
              ourSegments.retainAll(segmentsToProcess);
              remoteResults.onSegmentsLost(ourSegments);
            } else {
              remoteResults.onSegmentsLost(ch.getPrimarySegmentsForOwner(localAddress));
            }
          }
        } else {
          // This isn't actually used because localRun short circuits first
          localValue = null;
        }
        if (id != null) {
          try {
            if ((!localRun
                    || earlyTerminatePredicate == null
                    || !earlyTerminatePredicate.test(localValue))
                && !csm.awaitCompletion(id, timeout, timeoutUnit)) {
              throw new TimeoutException();
            }
          } catch (InterruptedException e) {
            throw new CacheException(e);
          }
        }

        if (!remoteResults.lostSegments.isEmpty()) {
          segmentsToProcess = new HashSet<>(remoteResults.lostSegments);
          remoteResults.lostSegments.clear();
          log.tracef("Found %s lost segments for identifier %s", segmentsToProcess, id);
        } else {
          // If we didn't lose any segments we don't need to process anymore
          if (segmentsToProcess != null) {
            segmentsToProcess = null;
          }
          log.tracef("Finished rehash aware operation for id %s", id);
        }
      } finally {
        csm.forgetOperation(id);
      }
    } while (segmentsToProcess != null && !segmentsToProcess.isEmpty());

    return remoteResults.currentValue;
  }
 @Override
 public Address getPrimaryOwner(Object key) {
   return dm.getPrimaryLocation(key);
 }
 @Override
 public boolean localNodeIsPrimaryOwner(Object key) {
   final Address address = rpcManager.getAddress();
   return dm.getPrimaryLocation(key).equals(address);
 }
예제 #23
0
  void performRehashKeyTrackingOperation(
      Function<
              Supplier<Stream<CacheEntry>>,
              KeyTrackingTerminalOperation<Object, ? extends T, Object>>
          function) {
    final AtomicBoolean complete = new AtomicBoolean();

    ConsistentHash segmentInfoCH = dm.getReadConsistentHash();
    KeyTrackingConsumer<Object, Object> results =
        new KeyTrackingConsumer<>(segmentInfoCH, (c) -> {}, c -> c, null, keyEquivalence);
    Set<Integer> segmentsToProcess =
        segmentsToFilter == null
            ? new ReplicatedConsistentHash.RangeSet(segmentInfoCH.getNumSegments())
            : segmentsToFilter;
    do {
      ConsistentHash ch = dm.getReadConsistentHash();
      boolean localRun = ch.getMembers().contains(localAddress);
      Set<Integer> segments;
      Set<Object> excludedKeys;
      if (localRun) {
        segments = ch.getPrimarySegmentsForOwner(localAddress);
        segments.retainAll(segmentsToProcess);

        excludedKeys =
            segments
                .stream()
                .flatMap(s -> results.referenceArray.get(s).stream())
                .collect(Collectors.toSet());
      } else {
        // This null is okay as it is only referenced if it was a localRun
        segments = null;
        excludedKeys = Collections.emptySet();
      }
      KeyTrackingTerminalOperation<Object, ? extends T, Object> op =
          function.apply(supplierForSegments(ch, segmentsToProcess, excludedKeys));
      op.handleInjection(registry);
      Object id =
          csm.remoteStreamOperationRehashAware(
              getParallelDistribution(),
              parallel,
              ch,
              segmentsToProcess,
              keysToFilter,
              new AtomicReferenceArrayToMap<>(results.referenceArray),
              includeLoader,
              op,
              results);
      try {
        if (localRun) {
          Collection<CacheEntry<Object, Object>> localValue =
              op.performOperationRehashAware(results);
          // TODO: we can do this more efficiently - this hampers performance during rehash
          if (dm.getReadConsistentHash().equals(ch)) {
            log.tracef("Found local values %s for id %s", localValue.size(), id);
            results.onCompletion(null, segments, localValue);
          } else {
            Set<Integer> ourSegments = ch.getPrimarySegmentsForOwner(localAddress);
            ourSegments.retainAll(segmentsToProcess);
            log.tracef(
                "CH changed - making %s segments suspect for identifier %s", ourSegments, id);
            results.onSegmentsLost(ourSegments);
            // We keep track of those keys so we don't fire them again
            results.onIntermediateResult(null, localValue);
          }
        }
        if (id != null) {
          try {
            if (!csm.awaitCompletion(id, timeout, timeoutUnit)) {
              throw new TimeoutException();
            }
          } catch (InterruptedException e) {
            throw new CacheException(e);
          }
        }
        if (!results.lostSegments.isEmpty()) {
          segmentsToProcess = new HashSet<>(results.lostSegments);
          results.lostSegments.clear();
          log.tracef("Found %s lost segments for identifier %s", segmentsToProcess, id);
        } else {
          log.tracef("Finished rehash aware operation for id %s", id);
          complete.set(true);
        }
      } finally {
        csm.forgetOperation(id);
      }
    } while (!complete.get());
  }
예제 #24
0
 private static boolean isOwner(Cache<?, ?> cache, Object key) {
   DistributionManager dm = cache.getAdvancedCache().getDistributionManager();
   return dm == null || dm.locate(key).contains(addressOf(cache));
 }
 private boolean getMightGoRemote(InvocationContext ctx, Object key) {
   return ctx.isOriginLocal()
       && configuration.getCacheMode().isDistributed()
       && !ctx.hasFlag(Flag.SKIP_REMOTE_LOOKUP)
       && !distManager.getLocality(key).isLocal();
 }
 private boolean isRemote(Object key) {
   return distributionManager != null && !distributionManager.getLocality(key).isLocal();
 }
 @Override
 public List<Address> getOwners(Object key) {
   return Immutables.immutableListConvert(dm.locate(key));
 }