public void testUnlinkWithIterator() {
    ConcurrentLinkedDeque8<Integer> q = new ConcurrentLinkedDeque8<>();

    q.add(1);
    Node<Integer> n2 = q.addx(2);
    Node<Integer> n3 = q.addx(3);
    Node<Integer> n4 = q.addx(4);
    Node<Integer> n5 = q.addx(5);
    q.add(6);

    Iterator<Integer> it = q.iterator();

    assertTrue(it.hasNext());
    assertEquals(1, it.next().intValue());

    assertTrue(it.hasNext());
    assertEquals(2, it.next().intValue());

    assertTrue(it.hasNext());
    assertEquals(3, it.next().intValue());

    q.unlinkx(n2);
    q.unlinkx(n3);
    q.unlinkx(n4);
    q.unlinkx(n5);

    assertTrue(it.hasNext());
    assertEquals(4, it.next().intValue());

    assertTrue(it.hasNext());
    assertEquals(6, it.next().intValue());
  }
  /**
   * Waits for renting partitions.
   *
   * @return {@code True} if mapping was changed.
   * @throws IgniteCheckedException If failed.
   */
  private boolean waitForRent() throws IgniteCheckedException {
    boolean changed = false;

    // Synchronously wait for all renting partitions to complete.
    for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) {
      GridDhtLocalPartition p = it.next();

      GridDhtPartitionState state = p.state();

      if (state == RENTING || state == EVICTED) {
        if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p);

        // Wait for partition to empty out.
        p.rent(true).get();

        if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p);

        // Remove evicted partition.
        it.remove();

        changed = true;
      }
    }

    return changed;
  }
  public void testUnlinkLastWithIterator() {
    ConcurrentLinkedDeque8<Integer> q = new ConcurrentLinkedDeque8<>();

    q.add(1);
    q.addx(2);
    Node<Integer> n3 = q.addx(3);

    Iterator<Integer> it = q.iterator();

    assertTrue(it.hasNext());
    assertEquals(1, it.next().intValue());

    q.unlinkx(n3);

    assertTrue(it.hasNext());
    assertEquals(2, it.next().intValue());

    assertFalse(it.hasNext());
  }
  /**
   * @param q Deque.
   * @param expSize Expected size.
   */
  @SuppressWarnings({"ForLoopReplaceableByForEach"})
  private <T> void checkSize(ConcurrentLinkedDeque8<T> q, int expSize) {
    int actualSize = 0;

    for (Iterator<T> iter = q.iterator(); iter.hasNext(); ) {
      iter.next();

      actualSize++;
    }

    assertEquals(expSize, actualSize);

    actualSize = 0;

    for (Iterator<T> iter = q.iterator(); iter.hasNext(); ) {
      iter.next();

      actualSize++;
    }

    assertEquals(expSize, actualSize);

    assertEquals(expSize, q.size());

    assertEquals(expSize, q.sizex());

    if (expSize > 0) {
      assert !q.isEmpty();

      assert !q.isEmptyx();
    } else {
      assert q.isEmpty();

      assert q.isEmptyx();
    }
  }
  public void testUnlink() {
    ConcurrentLinkedDeque8<Integer> deque = new ConcurrentLinkedDeque8<>();

    Node<Integer> n1 = deque.addx(1);
    Node<Integer> n2 = deque.addx(2);
    Node<Integer> n3 = deque.addx(3);
    Node<Integer> n4 = deque.addx(4);
    Node<Integer> n5 = deque.addx(5);

    deque.unlinkx(n2);

    checkSize(deque, 4);

    // Double unlinkx() call.
    deque.unlinkx(n2);

    checkSize(deque, 4);

    Iterator<Integer> iter = deque.iterator();

    boolean hasNext = iter.hasNext();

    assert hasNext;

    Integer next = iter.next();

    assert next == 1;

    iter.remove();

    // Iterator should have set item to null.
    assert n1.item() == null;

    checkSize(deque, 3);

    // Double unlinkx() call.
    deque.unlinkx(n1);

    checkSize(deque, 3);

    deque.unlinkx(n3);
    deque.unlinkx(n4);
    deque.unlinkx(n5);

    checkSize(deque, 0);
  }
  /** Clears values for this partition. */
  private void clearAll() {
    GridCacheVersion clearVer = cctx.versions().next();

    boolean swap = cctx.isSwapOrOffheapEnabled();

    boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);

    Iterator<GridDhtCacheEntry> it = map.values().iterator();

    GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null;

    if (swap
        && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values.
      Iterator<GridDhtCacheEntry> unswapIt = null;

      try {
        swapIt = cctx.swap().iterator(id);
        unswapIt = unswapIterator(swapIt);
      } catch (Exception e) {
        U.error(log, "Failed to clear swap for evicted partition: " + this, e);
      }

      if (unswapIt != null) it = F.concat(it, unswapIt);
    }

    try {
      while (it.hasNext()) {
        GridDhtCacheEntry cached = it.next();

        try {
          if (cached.clearInternal(clearVer, swap)) {
            map.remove(cached.key(), cached);

            if (!cached.isInternal()) {
              mapPubSize.decrement();

              if (rec)
                cctx.events()
                    .addEvent(
                        cached.partition(),
                        cached.key(),
                        cctx.localNodeId(),
                        (IgniteUuid) null,
                        null,
                        EVT_CACHE_REBALANCE_OBJECT_UNLOADED,
                        null,
                        false,
                        cached.rawGet(),
                        cached.hasValue(),
                        null,
                        null,
                        null);
            }
          }
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e);
        }
      }
    } finally {
      U.close(swapIt, log);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }