/**
   * Waits for renting partitions.
   *
   * @return {@code True} if mapping was changed.
   * @throws IgniteCheckedException If failed.
   */
  private boolean waitForRent() throws IgniteCheckedException {
    boolean changed = false;

    // Synchronously wait for all renting partitions to complete.
    for (Iterator<GridDhtLocalPartition> it = locParts.values().iterator(); it.hasNext(); ) {
      GridDhtLocalPartition p = it.next();

      GridDhtPartitionState state = p.state();

      if (state == RENTING || state == EVICTED) {
        if (log.isDebugEnabled()) log.debug("Waiting for renting partition: " + p);

        // Wait for partition to empty out.
        p.rent(true).get();

        if (log.isDebugEnabled()) log.debug("Finished waiting for renting partition: " + p);

        // Remove evicted partition.
        it.remove();

        changed = true;
      }
    }

    return changed;
  }
  /** {@inheritDoc} */
  @Override
  public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr)
      throws IgniteCheckedException {
    super.finishUnmarshal(ctx, ldr);

    if (writes != null) unmarshalTx(writes, false, ctx, ldr);

    if (reads != null) unmarshalTx(reads, false, ctx, ldr);

    if (grpLockKeyBytes != null && grpLockKey == null)
      grpLockKey = ctx.marshaller().unmarshal(grpLockKeyBytes, ldr);

    if (dhtVerKeys != null && dhtVers == null) {
      assert dhtVerVals != null;
      assert dhtVerKeys.size() == dhtVerVals.size();

      Iterator<IgniteTxKey> keyIt = dhtVerKeys.iterator();
      Iterator<GridCacheVersion> verIt = dhtVerVals.iterator();

      dhtVers = U.newHashMap(dhtVerKeys.size());

      while (keyIt.hasNext()) {
        IgniteTxKey key = keyIt.next();

        key.finishUnmarshal(ctx.cacheContext(key.cacheId()), ldr);

        dhtVers.put(key, verIt.next());
      }
    }

    if (txNodesBytes != null) txNodes = ctx.marshaller().unmarshal(txNodesBytes, ldr);
  }
  /** {@inheritDoc} */
  @Override
  public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr)
      throws IgniteCheckedException {
    super.finishUnmarshal(ctx, ldr);

    if (ownedValKeys != null && ownedVals == null) {
      ownedVals = U.newHashMap(ownedValKeys.size());

      assert ownedValKeys.size() == ownedValVals.size();

      Iterator<IgniteTxKey> keyIter = ownedValKeys.iterator();

      Iterator<CacheVersionedValue> valIter = ownedValVals.iterator();

      while (keyIter.hasNext()) {
        IgniteTxKey key = keyIter.next();

        GridCacheContext cctx = ctx.cacheContext(key.cacheId());

        CacheVersionedValue val = valIter.next();

        key.finishUnmarshal(cctx, ldr);

        val.finishUnmarshal(cctx, ldr);

        ownedVals.put(key, val);
      }
    }

    if (retVal != null && retVal.cacheId() != 0) {
      GridCacheContext cctx = ctx.cacheContext(retVal.cacheId());

      assert cctx != null : retVal.cacheId();

      retVal.finishUnmarshal(cctx, ldr);
    }

    if (filterFailedKeys != null) {
      for (IgniteTxKey key : filterFailedKeys) {
        GridCacheContext cctx = ctx.cacheContext(key.cacheId());

        key.finishUnmarshal(cctx, ldr);
      }
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testIterator() throws Exception {
    // Random queue name.
    String queueName = UUID.randomUUID().toString();

    IgniteQueue<String> queue = grid(0).queue(queueName, 0, config(false));

    for (int i = 0; i < 100; i++) assert queue.add(Integer.toString(i));

    Iterator<String> iter1 = queue.iterator();

    int cnt = 0;
    for (int i = 0; i < 100; i++) {
      assertNotNull(iter1.next());
      cnt++;
    }

    assertEquals(100, queue.size());
    assertEquals(100, cnt);

    assertNotNull(queue.take());
    assertNotNull(queue.take());
    assertTrue(queue.remove("33"));
    assertTrue(queue.remove("77"));

    assertEquals(96, queue.size());

    Iterator<String> iter2 = queue.iterator();

    try {
      iter2.remove();
    } catch (IllegalStateException e) {
      info("Caught expected exception: " + e);
    }

    iter2.next();
    iter2.remove();

    cnt = 0;
    while (iter2.hasNext()) {
      assertNotNull(iter2.next());
      cnt++;
    }

    assertEquals(95, cnt);
    assertEquals(95, queue.size());

    iter2.remove();
  }
  /** Clears values for this partition. */
  private void clearAll() {
    GridCacheVersion clearVer = cctx.versions().next();

    boolean swap = cctx.isSwapOrOffheapEnabled();

    boolean rec = cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);

    Iterator<GridDhtCacheEntry> it = map.values().iterator();

    GridCloseableIterator<Map.Entry<byte[], GridCacheSwapEntry>> swapIt = null;

    if (swap
        && GridQueryProcessor.isEnabled(cctx.config())) { // Indexing needs to unswap cache values.
      Iterator<GridDhtCacheEntry> unswapIt = null;

      try {
        swapIt = cctx.swap().iterator(id);
        unswapIt = unswapIterator(swapIt);
      } catch (Exception e) {
        U.error(log, "Failed to clear swap for evicted partition: " + this, e);
      }

      if (unswapIt != null) it = F.concat(it, unswapIt);
    }

    try {
      while (it.hasNext()) {
        GridDhtCacheEntry cached = it.next();

        try {
          if (cached.clearInternal(clearVer, swap)) {
            map.remove(cached.key(), cached);

            if (!cached.isInternal()) {
              mapPubSize.decrement();

              if (rec)
                cctx.events()
                    .addEvent(
                        cached.partition(),
                        cached.key(),
                        cctx.localNodeId(),
                        (IgniteUuid) null,
                        null,
                        EVT_CACHE_REBALANCE_OBJECT_UNLOADED,
                        null,
                        false,
                        cached.rawGet(),
                        cached.hasValue(),
                        null,
                        null,
                        null);
            }
          }
        } catch (IgniteCheckedException e) {
          U.error(log, "Failed to clear cache entry for evicted partition: " + cached, e);
        }
      }
    } finally {
      U.close(swapIt, log);
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionFullMap partMap) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating full partition map [exchId=" + exchId + ", parts=" + fullMapString() + ']');

    assert partMap != null;

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (exchId != null && lastExchangeId != null && lastExchangeId.compareTo(exchId) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (node2part != null && node2part.compareTo(partMap) >= 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale partition map for full partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ", curMap="
                  + node2part
                  + ", newMap="
                  + partMap
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      if (exchId != null) lastExchangeId = exchId;

      if (node2part != null) {
        for (GridDhtPartitionMap part : node2part.values()) {
          GridDhtPartitionMap newPart = partMap.get(part.nodeId());

          // If for some nodes current partition has a newer map,
          // then we keep the newer value.
          if (newPart != null && newPart.updateSequence() < part.updateSequence()) {
            if (log.isDebugEnabled())
              log.debug(
                  "Overriding partition map in full update map [exchId="
                      + exchId
                      + ", curPart="
                      + mapString(part)
                      + ", newPart="
                      + mapString(newPart)
                      + ']');

            partMap.put(part.nodeId(), part);
          }
        }

        for (Iterator<UUID> it = partMap.keySet().iterator(); it.hasNext(); ) {
          UUID nodeId = it.next();

          if (!cctx.discovery().alive(nodeId)) {
            if (log.isDebugEnabled())
              log.debug(
                  "Removing left node from full map update [nodeId="
                      + nodeId
                      + ", partMap="
                      + partMap
                      + ']');

            it.remove();
          }
        }
      }

      node2part = partMap;

      Map<Integer, Set<UUID>> p2n = new HashMap<>(cctx.affinity().partitions(), 1.0f);

      for (Map.Entry<UUID, GridDhtPartitionMap> e : partMap.entrySet()) {
        for (Integer p : e.getValue().keySet()) {
          Set<UUID> ids = p2n.get(p);

          if (ids == null)
            // Initialize HashSet to size 3 in anticipation that there won't be
            // more than 3 nodes per partitions.
            p2n.put(p, ids = U.newHashSet(3));

          ids.add(e.getKey());
        }
      }

      part2node = p2n;

      boolean changed = checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after full update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }