Esempio n. 1
0
 /**
  * Receive notification of updated keys right before they are committed in DataContainer.
  *
  * @param key the key that is being modified
  */
 @Override
 public void addUpdatedKey(Object key) {
   // grab a copy of the reference to prevent issues if another thread calls stopApplyingState()
   // between null check and actual usage
   final Set<Object> localUpdatedKeys = updatedKeys;
   if (localUpdatedKeys != null) {
     if (cacheTopology.getWriteConsistentHash().isKeyLocalToNode(rpcManager.getAddress(), key)) {
       localUpdatedKeys.add(key);
     }
   }
 }
Esempio n. 2
0
 @Override
 public void notifyTopologyChanged(
     CacheTopology oldTopology, CacheTopology newTopology, int newTopologyId, boolean pre) {
   if (!topologyChangedListeners.isEmpty()) {
     EventImpl<K, V> e = EventImpl.createEvent(cache, TOPOLOGY_CHANGED);
     e.setPre(pre);
     if (oldTopology != null) {
       e.setConsistentHashAtStart(oldTopology.getReadConsistentHash());
     }
     e.setConsistentHashAtEnd(newTopology.getWriteConsistentHash());
     e.setNewTopologyId(newTopologyId);
     for (CacheEntryListenerInvocation<K, V> listener : topologyChangedListeners)
       listener.invoke(e);
   }
 }
Esempio n. 3
0
  public void applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
    if (trace) {
      log.tracef(
          "Before applying the received state the data container of cache %s has %d keys",
          cacheName, dataContainer.size());
    }

    for (StateChunk stateChunk : stateChunks) {
      // it's possible to receive a late message so we must be prepared to ignore segments we no
      // longer own
      // todo [anistor] this check should be based on topologyId
      if (!cacheTopology
          .getWriteConsistentHash()
          .getSegmentsForOwner(rpcManager.getAddress())
          .contains(stateChunk.getSegmentId())) {
        log.warnf(
            "Discarding received cache entries for segment %d of cache %s because they do not belong to this node.",
            stateChunk.getSegmentId(), cacheName);
        continue;
      }

      // notify the inbound task that a chunk of cache entries was received
      InboundTransferTask inboundTransfer;
      synchronized (this) {
        inboundTransfer = transfersBySegment.get(stateChunk.getSegmentId());
      }
      if (inboundTransfer != null) {
        if (stateChunk.getCacheEntries() != null) {
          doApplyState(sender, stateChunk.getSegmentId(), stateChunk.getCacheEntries());
        }

        inboundTransfer.onStateReceived(stateChunk.getSegmentId(), stateChunk.isLastChunk());
      } else {
        log.warnf(
            "Received unsolicited state from node %s for segment %d of cache %s",
            sender, stateChunk.getSegmentId(), cacheName);
      }
    }

    if (trace) {
      log.tracef(
          "After applying the received state the data container of cache %s has %d keys",
          cacheName, dataContainer.size());
      synchronized (this) {
        log.tracef("Segments not received yet for cache %s: %s", cacheName, transfersBySource);
      }
    }
  }
Esempio n. 4
0
  @Override
  public void onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
    if (trace)
      log.tracef(
          "Received new CH %s for cache %s", cacheTopology.getWriteConsistentHash(), cacheName);

    int numStartedTopologyUpdates = activeTopologyUpdates.incrementAndGet();
    if (isRebalance) {
      rebalanceInProgress.set(true);
    }
    final ConsistentHash previousCh =
        this.cacheTopology != null ? this.cacheTopology.getWriteConsistentHash() : null;
    // Ensures writes to the data container use the right consistent hash
    // No need for a try/finally block, since it's just an assignment
    stateTransferLock.acquireExclusiveTopologyLock();
    this.cacheTopology = cacheTopology;
    if (numStartedTopologyUpdates == 1) {
      updatedKeys = new ConcurrentHashSet<Object>();
    }
    stateTransferLock.releaseExclusiveTopologyLock();
    stateTransferLock.notifyTopologyInstalled(cacheTopology.getTopologyId());

    try {
      // fetch transactions and data segments from other owners if this is enabled
      if (isTransactional || isFetchEnabled) {
        Set<Integer> addedSegments;
        if (previousCh == null) {
          // we start fresh, without any data, so we need to pull everything we own according to
          // writeCh

          addedSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          if (trace) {
            log.tracef("On cache %s we have: added segments: %s", cacheName, addedSegments);
          }
        } else {
          Set<Integer> previousSegments = getOwnedSegments(previousCh);
          Set<Integer> newSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          Set<Integer> removedSegments = new HashSet<Integer>(previousSegments);
          removedSegments.removeAll(newSegments);

          // This is a rebalance, we need to request the segments we own in the new CH.
          addedSegments = new HashSet<Integer>(newSegments);
          addedSegments.removeAll(previousSegments);

          if (trace) {
            log.tracef(
                "On cache %s we have: removed segments: %s; new segments: %s; old segments: %s; added segments: %s",
                cacheName, removedSegments, newSegments, previousSegments, addedSegments);
          }

          // remove inbound transfers and any data for segments we no longer own
          cancelTransfers(removedSegments);

          // If L1.onRehash is enabled, "removed" segments are actually moved to L1. The new (and
          // old) owners
          // will automatically add the nodes that no longer own a key to that key's requestors
          // list.
          invalidateSegments(newSegments, removedSegments);

          // check if any of the existing transfers should be restarted from a different source
          // because the initial source is no longer a member
          Set<Address> members =
              new HashSet<Address>(cacheTopology.getReadConsistentHash().getMembers());
          synchronized (this) {
            for (Iterator<Address> it = transfersBySource.keySet().iterator(); it.hasNext(); ) {
              Address source = it.next();
              if (!members.contains(source)) {
                if (trace) {
                  log.tracef(
                      "Removing inbound transfers from source %s for cache %s", source, cacheName);
                }
                List<InboundTransferTask> inboundTransfers = transfersBySource.get(source);
                it.remove();
                for (InboundTransferTask inboundTransfer : inboundTransfers) {
                  // these segments will be restarted if they are still in new write CH
                  if (trace) {
                    log.tracef(
                        "Removing inbound transfers for segments %s from source %s for cache %s",
                        inboundTransfer.getSegments(), source, cacheName);
                  }
                  transfersBySegment.keySet().removeAll(inboundTransfer.getSegments());
                  addedSegments.addAll(inboundTransfer.getUnfinishedSegments());
                }
              }
            }

            // exclude those that are already in progress from a valid source
            addedSegments.removeAll(transfersBySegment.keySet());
          }
        }

        if (!addedSegments.isEmpty()) {
          addTransfers(addedSegments); // add transfers for new or restarted segments
        }
      }
    } finally {
      stateTransferLock.notifyTransactionDataReceived(cacheTopology.getTopologyId());

      if (activeTopologyUpdates.decrementAndGet() == 0) {
        notifyEndOfTopologyUpdate(cacheTopology.getTopologyId());
      }
    }
  }