Exemplo n.º 1
0
  public void removeChannel(Channel channel) {
    lock.lock();
    try {
      if (channel.getDirection() == Direction.OUT) {
        BrokerHost registeredHost = null;
        for (Map.Entry<BrokerHost, List<Channel>> e : brokerHostToProducerChannelMap.entrySet()) {
          List<Channel> channels = e.getValue();
          Iterator<Channel> channelIterator = channels.iterator();
          while (channelIterator.hasNext()) {
            Channel c = channelIterator.next();
            if (c.equals(channel)) {
              registeredHost = e.getKey();
              channelIterator.remove();

              // if there are no more channels remove the producer
              if (channels.size() == 0) {
                Manageable producer = producers.remove(registeredHost);
                producer.stop();
              }
              break;
            }
          }
          if (registeredHost != null) {
            break;
          }
        }
      } else if (channel.getDirection() == Direction.IN) {
        BrokerHost registeredHost = null;
        for (Map.Entry<BrokerHost, List<Channel>> e : brokerHostToConsumerChannelMap.entrySet()) {
          List<Channel> channels = e.getValue();
          Iterator<Channel> channelIterator = channels.iterator();
          while (channelIterator.hasNext()) {
            Channel c = channelIterator.next();
            if (c.equals(channel)) {
              registeredHost = e.getKey();
              channelIterator.remove();

              // if there are no more channels remove the producer
              if (channels.size() == 0) {
                ConsumingWorker worker = consumingWorkers.remove(registeredHost);
                worker.stop();

                Manageable consumer = consumers.remove(registeredHost);
                consumer.stop();
              }
              break;
            }
          }
          if (registeredHost != null) {
            break;
          }
        }
      }
    } finally {
      lock.unlock();
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testIterator() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    for (int i = 0; i < 100; i++) assert queue.add(Integer.toString(i));

    Iterator<String> iter1 = queue.iterator();

    int cnt = 0;
    for (int i = 0; i < 100; i++) {
      assertNotNull(iter1.next());
      cnt++;
    }

    assertEquals(100, queue.size());
    assertEquals(100, cnt);

    assertNotNull(queue.take());
    assertNotNull(queue.take());
    assertTrue(queue.remove("33"));
    assertTrue(queue.remove("77"));

    assertEquals(96, queue.size());

    Iterator<String> iter2 = queue.iterator();

    try {
      iter2.remove();
    } catch (IllegalStateException e) {
      info("Caught expected exception: " + e);
    }

    iter2.next();
    iter2.remove();

    cnt = 0;
    while (iter2.hasNext()) {
      assertNotNull(iter2.next());
      cnt++;
    }

    assertEquals(95, cnt);
    assertEquals(95, queue.size());

    iter2.remove();
  }
  /** Return all of the neighbors with whom we share the provided range. */
  static Set<InetAddress> getNeighbors(String table, Range<Token> toRepair) {
    StorageService ss = StorageService.instance;
    Map<Range<Token>, List<InetAddress>> replicaSets = ss.getRangeToAddressMap(table);
    Range<Token> rangeSuperSet = null;
    for (Range<Token> range : ss.getLocalRanges(table)) {
      if (range.contains(toRepair)) {
        rangeSuperSet = range;
        break;
      } else if (range.intersects(toRepair)) {
        throw new IllegalArgumentException(
            "Requested range intersects a local range but is not fully contained in one; this would lead to imprecise repair");
      }
    }
    if (rangeSuperSet == null || !replicaSets.containsKey(toRepair)) return Collections.emptySet();

    Set<InetAddress> neighbors = new HashSet<InetAddress>(replicaSets.get(rangeSuperSet));
    neighbors.remove(FBUtilities.getBroadcastAddress());
    // Excluding all node with version <= 0.7 since they don't know how to
    // create a correct merkle tree (they build it over the full range)
    Iterator<InetAddress> iter = neighbors.iterator();
    while (iter.hasNext()) {
      InetAddress endpoint = iter.next();
      if (Gossiper.instance.getVersion(endpoint) <= MessagingService.VERSION_07) {
        logger.info(
            "Excluding "
                + endpoint
                + " from repair because it is on version 0.7 or sooner. You should consider updating this node before running repair again.");
        iter.remove();
      }
    }
    return neighbors;
  }
Exemplo n.º 4
0
 protected void handleIterator(String[] args) {
   Iterator it = null;
   String iteratorStr = args[0];
   if (iteratorStr.startsWith("s.")) {
     it = getSet().iterator();
   } else if (iteratorStr.startsWith("m.")) {
     it = getMap().keySet().iterator();
   } else if (iteratorStr.startsWith("q.")) {
     it = getQueue().iterator();
   } else if (iteratorStr.startsWith("l.")) {
     it = getList().iterator();
   }
   if (it != null) {
     boolean remove = false;
     if (args.length > 1) {
       String removeStr = args[1];
       remove = removeStr.equals("remove");
     }
     int count = 1;
     while (it.hasNext()) {
       print(count++ + " " + it.next());
       if (remove) {
         it.remove();
         print(" removed");
       }
       println("");
     }
   }
 }
  /**
   * Determines whether {@code effect} has been paused/stopped by the remote peer. The determination
   * is based on counting frames and is triggered by the receipt of (a piece of) a new (video) frame
   * by {@code cause}.
   *
   * @param cause the {@code SimulcastLayer} which has received (a piece of) a new (video) frame and
   *     has thus triggered a check on {@code effect}
   * @param pkt the {@code RawPacket} which was received by {@code cause} and possibly influenced
   *     the decision to trigger a check on {@code effect}
   * @param effect the {@code SimulcastLayer} which is to be checked whether it looks like it has
   *     been paused/stopped by the remote peer
   * @param endIndexInSimulcastLayerFrameHistory
   */
  private void maybeTimeout(
      SimulcastLayer cause,
      RawPacket pkt,
      SimulcastLayer effect,
      int endIndexInSimulcastLayerFrameHistory) {
    Iterator<SimulcastLayer> it = simulcastLayerFrameHistory.iterator();
    boolean timeout = true;

    for (int ix = 0; it.hasNext() && ix < endIndexInSimulcastLayerFrameHistory; ++ix) {
      if (it.next() == effect) {
        timeout = false;
        break;
      }
    }
    if (timeout) {
      effect.maybeTimeout(pkt);

      if (!effect.isStreaming()) {
        // Since effect has been determined to have been paused/stopped
        // by the remote peer, its possible presence in
        // simulcastLayerFrameHistory is irrelevant now. In other words,
        // remove effect from simulcastLayerFrameHistory.
        while (it.hasNext()) {
          if (it.next() == effect) it.remove();
        }
      }
    }
  }
 public void testDescendingSubMapContents2() {
   ConcurrentNavigableMap map = dmap5();
   SortedMap sm = map.subMap(m2, m3);
   assertEquals(1, sm.size());
   assertEquals(m2, sm.firstKey());
   assertEquals(m2, sm.lastKey());
   assertFalse(sm.containsKey(m1));
   assertTrue(sm.containsKey(m2));
   assertFalse(sm.containsKey(m3));
   assertFalse(sm.containsKey(m4));
   assertFalse(sm.containsKey(m5));
   Iterator i = sm.keySet().iterator();
   Object k;
   k = (Integer) (i.next());
   assertEquals(m2, k);
   assertFalse(i.hasNext());
   Iterator j = sm.keySet().iterator();
   j.next();
   j.remove();
   assertFalse(map.containsKey(m2));
   assertEquals(4, map.size());
   assertEquals(0, sm.size());
   assertTrue(sm.isEmpty());
   assertSame(sm.remove(m3), null);
   assertEquals(4, map.size());
 }
 public void testSubMapContents2() {
   ConcurrentNavigableMap map = map5();
   SortedMap sm = map.subMap(two, three);
   assertEquals(1, sm.size());
   assertEquals(two, sm.firstKey());
   assertEquals(two, sm.lastKey());
   assertFalse(sm.containsKey(one));
   assertTrue(sm.containsKey(two));
   assertFalse(sm.containsKey(three));
   assertFalse(sm.containsKey(four));
   assertFalse(sm.containsKey(five));
   Iterator i = sm.keySet().iterator();
   Object k;
   k = (Integer) (i.next());
   assertEquals(two, k);
   assertFalse(i.hasNext());
   Iterator j = sm.keySet().iterator();
   j.next();
   j.remove();
   assertFalse(map.containsKey(two));
   assertEquals(4, map.size());
   assertEquals(0, sm.size());
   assertTrue(sm.isEmpty());
   assertSame(sm.remove(three), null);
   assertEquals(4, map.size());
 }
  /**
   * Waits for renting partitions.
   *
   * @return {@code True} if mapping was changed.
   * @throws IgniteCheckedException If failed.
   */
  private boolean waitForRent() throws IgniteCheckedException {
    boolean changed = false;

    // Synchronously wait for all renting partitions to complete.
    for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) {
      GridDhtLocalPartition p = it.next();

      GridDhtPartitionState state = p.state();

      if (state == RENTING || state == EVICTED) {
        if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p);

        // Wait for partition to empty out.
        p.rent(true).get();

        if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p);

        // Remove evicted partition.
        it.remove();

        changed = true;
      }
    }

    return changed;
  }
Exemplo n.º 9
0
  /** Selects on sockets and informs their Communicator when there is something to do. */
  public void communicate(int timeout) {

    try {
      selector.select(timeout);
    } catch (IOException e) {
      // Not really sure why/when this happens yet
      return;
    }

    Iterator<SelectionKey> keys = selector.selectedKeys().iterator();

    while (keys.hasNext()) {
      SelectionKey key = keys.next();
      keys.remove();
      if (!key.isValid()) continue; // WHY
      Communicator communicator = (Communicator) key.attachment();

      if (key.isReadable()) communicator.onReadable();
      if (key.isWritable()) communicator.onWritable();
      if (key.isAcceptable()) communicator.onAcceptable();
    }

    // Go through the queue and handle each communicator
    while (!queue.isEmpty()) {
      Communicator c = queue.poll();
      c.onMemo();
    }
  }
Exemplo n.º 10
0
  /**
   * Remove records telling what entity caps node a contact has.
   *
   * @param contact the contact
   */
  public void removeContactCapsNode(Contact contact) {
    Caps caps = null;
    String lastRemovedJid = null;

    Iterator<String> iter = userCaps.keySet().iterator();
    while (iter.hasNext()) {
      String jid = iter.next();

      if (StringUtils.parseBareAddress(jid).equals(contact.getAddress())) {
        caps = userCaps.get(jid);
        lastRemovedJid = jid;
        iter.remove();
      }
    }

    // fire only for the last one, at the end the event out
    // of the protocol will be one and for the contact
    if (caps != null) {
      UserCapsNodeListener[] listeners;
      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeRemoved(lastRemovedJid, nodeVer, false);
      }
    }
  }
Exemplo n.º 11
0
  protected void handleView(View view) {
    this.view = view;
    if (log.isDebugEnabled()) log.debug("view=" + view);
    List<Address> members = view.getMembers();

    _consumerLock.lock();
    try {
      // This removes the consumers that were registered that are now gone
      Iterator<Owner> iterator = _consumersAvailable.iterator();
      while (iterator.hasNext()) {
        Owner owner = iterator.next();
        if (!members.contains(owner.getAddress())) {
          iterator.remove();
          sendRemoveConsumerRequest(owner);
        }
      }

      // This removes the tasks that those requestors are gone
      iterator = _runRequests.iterator();
      while (iterator.hasNext()) {
        Owner owner = iterator.next();
        if (!members.contains(owner.getAddress())) {
          iterator.remove();
          sendRemoveRunRequest(owner);
        }
      }

      synchronized (_awaitingReturn) {
        for (Entry<Owner, Runnable> entry : _awaitingReturn.entrySet()) {
          // The person currently servicing our request has gone down
          // without completing so we have to keep our request alive by
          // sending ours back to the coordinator
          Owner owner = entry.getKey();
          if (!members.contains(owner.getAddress())) {
            Runnable runnable = entry.getValue();
            // We need to register the request id before sending the request back to the coordinator
            // in case if our task gets picked up since another was removed
            _requestId.put(runnable, owner.getRequestId());
            _awaitingConsumer.add(runnable);
            sendToCoordinator(RUN_REQUEST, owner.getRequestId(), local_addr);
          }
        }
      }
    } finally {
      _consumerLock.unlock();
    }
  }
  /**
   * Notifies this {@code SimulcastReceiver} that a specific {@code SimulcastReceiver} has detected
   * the start of a new video frame in the RTP stream that it represents. Determines whether any of
   * {@link #simulcastLayers} other than {@code source} have been paused/stopped by the remote peer.
   * The determination is based on counting (video) frames.
   *
   * @param source the {@code SimulcastLayer} which is the source of the event i.e. which has
   *     detected the start of a new video frame in the RTP stream that it represents
   * @param pkt the {@code RawPacket} which was received by {@code source} and possibly influenced
   *     the decision that a new view frame was started in the RTP stream represented by {@code
   *     source}
   * @param layers the set of {@code SimulcastLayer}s managed by this {@code SimulcastReceiver}.
   *     Explicitly provided to the method in order to avoid invocations of {@link
   *     #getSimulcastLayers()} because the latter makes a copy at the time of this writing.
   */
  private void simulcastLayerFrameStarted(
      SimulcastLayer source, RawPacket pkt, SimulcastLayer[] layers) {
    // Allow the value of the constant TIMEOUT_ON_FRAME_COUNT to disable (at
    // compile time) the frame-based approach to the detection of layer
    // drops.
    if (TIMEOUT_ON_FRAME_COUNT <= 1) return;

    // Timeouts in layers caused by source may occur only based on the span
    // (of time or received frames) during which source has received
    // TIMEOUT_ON_FRAME_COUNT number of frames. The current method
    // invocation signals the receipt of 1 frame by source.
    int indexOfLastSourceOccurrenceInHistory = -1;
    int sourceFrameCount = 0;
    int ix = 0;

    for (Iterator<SimulcastLayer> it = simulcastLayerFrameHistory.iterator(); it.hasNext(); ++ix) {
      if (it.next() == source) {
        if (indexOfLastSourceOccurrenceInHistory != -1) {
          // Prune simulcastLayerFrameHistory so that it does not
          // become unnecessarily long.
          it.remove();
        } else if (++sourceFrameCount >= TIMEOUT_ON_FRAME_COUNT - 1) {
          // The span of TIMEOUT_ON_FRAME_COUNT number of frames
          // received by source only is to be examined for the
          // purposes of timeouts. The current method invocations
          // signals the receipt of 1 frame by source so
          // TIMEOUT_ON_FRAME_COUNT - 1 occurrences of source in
          // simulcastLayerFrameHistory is enough.
          indexOfLastSourceOccurrenceInHistory = ix;
        }
      }
    }

    if (indexOfLastSourceOccurrenceInHistory != -1) {
      // Presumably, if a SimulcastLayer is active, all SimulcastLayers
      // before it (according to SimulcastLayer's order) are active as
      // well. Consequently, timeouts may occur in SimulcastLayers which
      // are after source.
      boolean maybeTimeout = false;

      for (SimulcastLayer layer : layers) {
        if (maybeTimeout) {
          // There's no point in timing layer out if it's timed out
          // already.
          if (layer.isStreaming()) {
            maybeTimeout(source, pkt, layer, indexOfLastSourceOccurrenceInHistory);
          }
        } else if (layer == source) {
          maybeTimeout = true;
        }
      }
    }

    // As previously stated, the current method invocation signals the
    // receipt of 1 frame by source.
    simulcastLayerFrameHistory.add(0, source);
    // TODO Prune simulcastLayerFrameHistory by forgetting so that it does
    // not become too long.
  }
 /** iterator.remove removes element */
 public void testIteratorRemove() {
   List full = populatedArray(SIZE);
   Iterator it = full.iterator();
   Object first = full.get(0);
   it.next();
   it.remove();
   assertFalse(full.contains(first));
 }
Exemplo n.º 14
0
 /** Stop the executors and clean memory on registered CUObject */
 public static void stop() {
   synchronized (cudaEngines) {
     cuCtxSynchronizeAll();
     for (Iterator<CudaEngine> iterator = cudaEngines.values().iterator(); iterator.hasNext(); ) {
       iterator.next().shutdown();
       iterator.remove();
     }
     //		for (CudaEngine ce : cudaEngines.values()) {
     //			ce.shutdown();
     //		}
   }
 }
Exemplo n.º 15
0
  public void deregisterAllVerbHandlers(EndPoint localEndPoint) {
    Iterator keys = verbHandlers_.keySet().iterator();
    String key = null;

    /*
     * endpoint specific verbhandlers can be distinguished because
     * their key's contain the name of the endpoint.
     */
    while (keys.hasNext()) {
      key = (String) keys.next();
      if (key.contains(localEndPoint.toString())) keys.remove();
    }
  }
 /** Return all of the neighbors with whom we share data. */
 static Set<InetAddress> getNeighbors(String table, Range range) {
   StorageService ss = StorageService.instance;
   Map<Range, List<InetAddress>> replicaSets = ss.getRangeToAddressMap(table);
   if (!replicaSets.containsKey(range)) return Collections.emptySet();
   Set<InetAddress> neighbors = new HashSet<InetAddress>(replicaSets.get(range));
   neighbors.remove(FBUtilities.getLocalAddress());
   // Excluding all node with version <= 0.7 since they don't know how to
   // create a correct merkle tree (they build it over the full range)
   Iterator<InetAddress> iter = neighbors.iterator();
   while (iter.hasNext()) {
     InetAddress endpoint = iter.next();
     if (Gossiper.instance.getVersion(endpoint) <= MessagingService.VERSION_07) {
       logger.info(
           "Excluding "
               + endpoint
               + " from repair because it is on version 0.7 or sooner. You should consider updating this node before running repair again.");
       iter.remove();
     }
   }
   return neighbors;
 }
Exemplo n.º 17
0
  void autoUpdate() {
    if (hasTitle()) {
      return;
    }

    for (Shape s : allShapes) {
      s.autoUpdate();
    }

    Iterator<Shape> iter = allShapes.iterator();
    while (iter.hasNext()) {
      Shape s = iter.next();
      s.update();
      if (s.isDestroyed()) {
        if (s.isSolid()) {
          removeSolid(s);
        }
        removeFromLayers(s);
        iter.remove();
      }
    }
  }
 @Test
 public void iterateOverMapKeys() {
   HazelcastClient hClient = getHazelcastClient();
   Map<String, String> map = hClient.getMap("iterateOverMapKeys");
   map.put("1", "A");
   map.put("2", "B");
   map.put("3", "C");
   Set<String> keySet = map.keySet();
   assertEquals(3, keySet.size());
   Set<String> s = new HashSet<String>();
   for (String string : keySet) {
     s.add(string);
     assertTrue(Arrays.asList("1", "2", "3").contains(string));
   }
   assertEquals(3, s.size());
   Iterator<String> iterator = keySet.iterator();
   while (iterator.hasNext()) {
     iterator.next();
     iterator.remove();
   }
   assertEquals(0, map.size());
 }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }
Exemplo n.º 20
0
  @Override
  public void onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
    if (trace)
      log.tracef(
          "Received new CH %s for cache %s", cacheTopology.getWriteConsistentHash(), cacheName);

    int numStartedTopologyUpdates = activeTopologyUpdates.incrementAndGet();
    if (isRebalance) {
      rebalanceInProgress.set(true);
    }
    final ConsistentHash previousCh =
        this.cacheTopology != null ? this.cacheTopology.getWriteConsistentHash() : null;
    // Ensures writes to the data container use the right consistent hash
    // No need for a try/finally block, since it's just an assignment
    stateTransferLock.acquireExclusiveTopologyLock();
    this.cacheTopology = cacheTopology;
    if (numStartedTopologyUpdates == 1) {
      updatedKeys = new ConcurrentHashSet<Object>();
    }
    stateTransferLock.releaseExclusiveTopologyLock();
    stateTransferLock.notifyTopologyInstalled(cacheTopology.getTopologyId());

    try {
      // fetch transactions and data segments from other owners if this is enabled
      if (isTransactional || isFetchEnabled) {
        Set<Integer> addedSegments;
        if (previousCh == null) {
          // we start fresh, without any data, so we need to pull everything we own according to
          // writeCh

          addedSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          if (trace) {
            log.tracef("On cache %s we have: added segments: %s", cacheName, addedSegments);
          }
        } else {
          Set<Integer> previousSegments = getOwnedSegments(previousCh);
          Set<Integer> newSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          Set<Integer> removedSegments = new HashSet<Integer>(previousSegments);
          removedSegments.removeAll(newSegments);

          // This is a rebalance, we need to request the segments we own in the new CH.
          addedSegments = new HashSet<Integer>(newSegments);
          addedSegments.removeAll(previousSegments);

          if (trace) {
            log.tracef(
                "On cache %s we have: removed segments: %s; new segments: %s; old segments: %s; added segments: %s",
                cacheName, removedSegments, newSegments, previousSegments, addedSegments);
          }

          // remove inbound transfers and any data for segments we no longer own
          cancelTransfers(removedSegments);

          // If L1.onRehash is enabled, "removed" segments are actually moved to L1. The new (and
          // old) owners
          // will automatically add the nodes that no longer own a key to that key's requestors
          // list.
          invalidateSegments(newSegments, removedSegments);

          // check if any of the existing transfers should be restarted from a different source
          // because the initial source is no longer a member
          Set<Address> members =
              new HashSet<Address>(cacheTopology.getReadConsistentHash().getMembers());
          synchronized (this) {
            for (Iterator<Address> it = transfersBySource.keySet().iterator(); it.hasNext(); ) {
              Address source = it.next();
              if (!members.contains(source)) {
                if (trace) {
                  log.tracef(
                      "Removing inbound transfers from source %s for cache %s", source, cacheName);
                }
                List<InboundTransferTask> inboundTransfers = transfersBySource.get(source);
                it.remove();
                for (InboundTransferTask inboundTransfer : inboundTransfers) {
                  // these segments will be restarted if they are still in new write CH
                  if (trace) {
                    log.tracef(
                        "Removing inbound transfers for segments %s from source %s for cache %s",
                        inboundTransfer.getSegments(), source, cacheName);
                  }
                  transfersBySegment.keySet().removeAll(inboundTransfer.getSegments());
                  addedSegments.addAll(inboundTransfer.getUnfinishedSegments());
                }
              }
            }

            // exclude those that are already in progress from a valid source
            addedSegments.removeAll(transfersBySegment.keySet());
          }
        }

        if (!addedSegments.isEmpty()) {
          addTransfers(addedSegments); // add transfers for new or restarted segments
        }
      }
    } finally {
      stateTransferLock.notifyTransactionDataReceived(cacheTopology.getTopologyId());

      if (activeTopologyUpdates.decrementAndGet() == 0) {
        notifyEndOfTopologyUpdate(cacheTopology.getTopologyId());
      }
    }
  }