示例#1
0
  /**
   * Completeness callback.
   *
   * @param success {@code True} if lock was acquired.
   * @param distribute {@code True} if need to distribute lock removal in case of failure.
   * @return {@code True} if complete by this operation.
   */
  private boolean onComplete(boolean success, boolean distribute) {
    if (log.isDebugEnabled())
      log.debug(
          "Received onComplete(..) callback [success="
              + success
              + ", distribute="
              + distribute
              + ", fut="
              + this
              + ']');

    if (!success) undoLocks(distribute);

    if (tx != null) cctx.tm().txContext(tx);

    if (super.onDone(success, err.get())) {
      if (log.isDebugEnabled()) log.debug("Completing future: " + this);

      // Clean up.
      cctx.mvcc().removeFuture(this);

      if (timeoutObj != null) cctx.time().removeTimeoutObject(timeoutObj);

      return true;
    }

    return false;
  }
示例#2
0
  /**
   * @param nodeId Sender.
   * @param res Result.
   */
  void onResult(UUID nodeId, GridNearLockResponse<K, V> res) {
    if (!isDone()) {
      if (log.isDebugEnabled())
        log.debug(
            "Received lock response from node [nodeId="
                + nodeId
                + ", res="
                + res
                + ", fut="
                + this
                + ']');

      for (GridFuture<Boolean> fut : pending()) {
        if (isMini(fut)) {
          MiniFuture mini = (MiniFuture) fut;

          if (mini.futureId().equals(res.miniId())) {
            assert mini.node().id().equals(nodeId);

            if (log.isDebugEnabled())
              log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']');

            mini.onResult(res);

            if (log.isDebugEnabled())
              log.debug(
                  "Future after processed lock response [fut="
                      + this
                      + ", mini="
                      + mini
                      + ", res="
                      + res
                      + ']');

            return;
          }
        }
      }

      U.warn(
          log,
          "Failed to find mini future for response (perhaps due to stale message) [res="
              + res
              + ", fut="
              + this
              + ']');
    } else if (log.isDebugEnabled())
      log.debug(
          "Ignoring lock response from node (future is done) [nodeId="
              + nodeId
              + ", res="
              + res
              + ", fut="
              + this
              + ']');
  }
示例#3
0
  /**
   * @param cancel {@code True} to close with cancellation.
   * @throws GridException If failed.
   */
  @Override
  public void close(boolean cancel) throws GridException {
    if (!closed.compareAndSet(false, true)) return;

    busyLock.block();

    if (log.isDebugEnabled())
      log.debug("Closing data loader [ldr=" + this + ", cancel=" + cancel + ']');

    GridException e = null;

    try {
      // Assuming that no methods are called on this loader after this method is called.
      if (cancel) {
        cancelled = true;

        for (Buffer buf : bufMappings.values()) buf.cancelAll();
      } else doFlush();

      ctx.event().removeLocalEventListener(discoLsnr);

      ctx.io().removeMessageListener(topic);
    } catch (GridException e0) {
      e = e0;
    }

    fut.onDone(null, e);

    if (e != null) throw e;
  }
示例#4
0
  /** @return {@code True} if locks have been acquired. */
  private boolean checkLocks() {
    if (!isDone() && initialized() && !hasPending()) {
      for (int i = 0; i < entries.size(); i++) {
        while (true) {
          GridCacheEntryEx<K, V> cached = entries.get(i);

          try {
            if (!locked(cached)) {
              if (log.isDebugEnabled())
                log.debug(
                    "Lock is still not acquired for entry (will keep waiting) [entry="
                        + cached
                        + ", fut="
                        + this
                        + ']');

              return false;
            }

            break;
          }
          // Possible in concurrent cases, when owner is changed after locks
          // have been released or cancelled.
          catch (GridCacheEntryRemovedException ignore) {
            if (log.isDebugEnabled())
              log.debug("Got removed entry in onOwnerChanged method (will retry): " + cached);

            // Replace old entry with new one.
            entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(cached.key()));
          }
        }
      }

      if (log.isDebugEnabled())
        log.debug("Local lock acquired for entries [fut=" + this + ", entries=" + entries + "]");

      onComplete(true, true);

      return true;
    }

    return false;
  }
示例#5
0
  /**
   * Undoes all locks.
   *
   * @param dist If {@code true}, then remove locks from remote nodes as well.
   */
  private void undoLocks(boolean dist) {
    // Transactions will undo during rollback.
    if (dist && tx == null) cctx.nearTx().removeLocks(lockVer, keys);
    else {
      if (tx != null) {
        if (tx.setRollbackOnly()) {
          if (log.isDebugEnabled())
            log.debug(
                "Marked transaction as rollback only because locks could not be acquired: " + tx);
        } else if (log.isDebugEnabled())
          log.debug(
              "Transaction was not marked rollback-only while locks were not acquired: " + tx);
      }

      for (GridCacheEntryEx<K, V> e : entriesCopy()) {
        try {
          e.removeLock(lockVer);
        } catch (GridCacheEntryRemovedException ignored) {
          while (true) {
            try {
              e = cctx.cache().peekEx(e.key());

              if (e != null) e.removeLock(lockVer);

              break;
            } catch (GridCacheEntryRemovedException ignore) {
              if (log.isDebugEnabled())
                log.debug(
                    "Attempted to remove lock on removed entry (will retry) [ver="
                        + lockVer
                        + ", entry="
                        + e
                        + ']');
            }
          }
        }
      }
    }

    cctx.mvcc().recheckPendingLocks();
  }
示例#6
0
  /**
   * @param nodeId Left node ID
   * @return {@code True} if node was in the list.
   */
  @SuppressWarnings({"ThrowableInstanceNeverThrown"})
  @Override
  public boolean onNodeLeft(UUID nodeId) {
    boolean found = false;

    for (GridFuture<?> fut : futures()) {
      if (isMini(fut)) {
        MiniFuture f = (MiniFuture) fut;

        if (f.node().id().equals(nodeId)) {
          if (log.isDebugEnabled())
            log.debug(
                "Found mini-future for left node [nodeId="
                    + nodeId
                    + ", mini="
                    + f
                    + ", fut="
                    + this
                    + ']');

          f.onResult(newTopologyException(null, nodeId));

          found = true;
        }
      }
    }

    if (!found) {
      if (log.isDebugEnabled())
        log.debug(
            "Near lock future does not have mapping for left node (ignoring) [nodeId="
                + nodeId
                + ", fut="
                + this
                + ']');
    }

    return found;
  }
示例#7
0
  /**
   * @param cached Entry to check.
   * @return {@code True} if filter passed.
   */
  private boolean filter(GridCacheEntryEx<K, V> cached) {
    try {
      if (!cctx.isAll(cached, filter)) {
        if (log.isDebugEnabled())
          log.debug("Filter didn't pass for entry (will fail lock): " + cached);

        onFailed(true);

        return false;
      }

      return true;
    } catch (GridException e) {
      onError(e);

      return false;
    }
  }
示例#8
0
  /**
   * Adds entry to future.
   *
   * @param topVer Topology version.
   * @param entry Entry to add.
   * @param dhtNodeId DHT node ID.
   * @return Lock candidate.
   * @throws GridCacheEntryRemovedException If entry was removed.
   */
  @Nullable
  private GridCacheMvccCandidate<K> addEntry(
      long topVer, GridNearCacheEntry<K, V> entry, UUID dhtNodeId)
      throws GridCacheEntryRemovedException {
    // Check if lock acquisition is timed out.
    if (timedOut) return null;

    // Add local lock first, as it may throw GridCacheEntryRemovedException.
    GridCacheMvccCandidate<K> c =
        entry.addNearLocal(
            dhtNodeId, threadId, lockVer, timeout, !inTx(), inTx(), implicitSingleTx());

    if (inTx()) {
      GridCacheTxEntry<K, V> txEntry = tx.entry(entry.key());

      txEntry.cached(entry, txEntry.keyBytes());
    }

    if (c != null) c.topologyVersion(topVer);

    synchronized (mux) {
      entries.add(entry);
    }

    if (c == null && timeout < 0) {
      if (log.isDebugEnabled()) log.debug("Failed to acquire lock with negative timeout: " + entry);

      onFailed(false);

      return null;
    }

    // Double check if lock acquisition has already timed out.
    if (timedOut) {
      entry.removeLock(lockVer);

      return null;
    }

    return c;
  }
示例#9
0
  /** {@inheritDoc} */
  @Override
  public boolean onDone(Boolean success, Throwable err) {
    if (log.isDebugEnabled())
      log.debug(
          "Received onDone(..) callback [success="
              + success
              + ", err="
              + err
              + ", fut="
              + this
              + ']');

    // If locks were not acquired yet, delay completion.
    if (isDone() || (err == null && success && !checkLocks())) return false;

    this.err.compareAndSet(null, err instanceof GridCacheLockTimeoutException ? null : err);

    if (err != null) success = false;

    return onComplete(success, true);
  }
示例#10
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
示例#11
0
  /**
   * @param ctx Grid kernal context.
   * @param cacheName Cache name.
   * @param flushQ Flush queue.
   */
  public GridDataLoaderImpl(
      final GridKernalContext ctx,
      @Nullable final String cacheName,
      DelayQueue<GridDataLoaderImpl<K, V>> flushQ) {
    assert ctx != null;

    this.ctx = ctx;
    this.cacheName = cacheName;
    this.flushQ = flushQ;

    log = U.logger(ctx, logRef, GridDataLoaderImpl.class);

    discoLsnr =
        new GridLocalEventListener() {
          @Override
          public void onEvent(GridEvent evt) {
            assert evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT;

            GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt;

            UUID id = discoEvt.eventNodeId();

            // Remap regular mappings.
            final Buffer buf = bufMappings.remove(id);

            if (buf != null) {
              // Only async notification is possible since
              // discovery thread may be trapped otherwise.
              ctx.closure()
                  .callLocalSafe(
                      new Callable<Object>() {
                        @Override
                        public Object call() throws Exception {
                          buf.onNodeLeft();

                          return null;
                        }
                      },
                      true /* system pool */);
            }
          }
        };

    ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT);

    // Generate unique topic for this loader.
    topic = TOPIC_DATALOAD.topic(GridUuid.fromUuid(ctx.localNodeId()));

    ctx.io()
        .addMessageListener(
            topic,
            new GridMessageListener() {
              @Override
              public void onMessage(UUID nodeId, Object msg) {
                assert msg instanceof GridDataLoadResponse;

                GridDataLoadResponse res = (GridDataLoadResponse) msg;

                if (log.isDebugEnabled()) log.debug("Received data load response: " + res);

                Buffer buf = bufMappings.get(nodeId);

                if (buf != null) buf.onResponse(res);
                else if (log.isDebugEnabled())
                  log.debug("Ignoring response since node has left [nodeId=" + nodeId + ", ");
              }
            });

    if (log.isDebugEnabled()) log.debug("Added response listener within topic: " + topic);

    fut = new GridDataLoaderFuture(ctx, this);
  }
示例#12
0
  /**
   * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
   * remote primary node.
   *
   * @param mappings Queue of mappings.
   * @throws GridException If mapping can not be completed.
   */
  private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings)
      throws GridException {
    GridNearLockMapping<K, V> map = mappings.poll();

    // If there are no more mappings to process, complete the future.
    if (map == null) return;

    final GridNearLockRequest<K, V> req = map.request();
    final Collection<K> mappedKeys = map.distributedKeys();
    final GridNode node = map.node();

    if (filter != null && filter.length != 0) req.filter(filter, cctx);

    if (node.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req);

      GridFuture<GridNearLockResponse<K, V>> fut;

      if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter);
      else {
        // Create dummy values for testing.
        GridNearLockResponse<K, V> res =
            new GridNearLockResponse<>(lockVer, futId, null, false, 1, null);

        res.addValueBytes(null, null, true, lockVer, lockVer, cctx);

        fut = new GridFinishedFuture<>(ctx, res);
      }

      // Add new future.
      add(
          new GridEmbeddedFuture<>(
              cctx.kernalContext(),
              fut,
              new C2<GridNearLockResponse<K, V>, Exception, Boolean>() {
                @Override
                public Boolean apply(GridNearLockResponse<K, V> res, Exception e) {
                  if (CU.isLockTimeoutOrCancelled(e)
                      || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false;

                  if (e != null) {
                    onError(e);

                    return false;
                  }

                  if (res == null) {
                    onError(new GridException("Lock response is null for future: " + this));

                    return false;
                  }

                  if (res.error() != null) {
                    onError(res.error());

                    return false;
                  }

                  if (log.isDebugEnabled())
                    log.debug(
                        "Acquired lock for local DHT mapping [locId="
                            + cctx.nodeId()
                            + ", mappedKeys="
                            + mappedKeys
                            + ", fut="
                            + GridNearLockFuture.this
                            + ']');

                  try {
                    int i = 0;

                    for (K k : mappedKeys) {
                      while (true) {
                        GridNearCacheEntry<K, V> entry =
                            cctx.near().entryExx(k, req.topologyVersion());

                        try {
                          GridTuple3<GridCacheVersion, V, byte[]> oldValTup =
                              valMap.get(entry.key());

                          boolean hasBytes = entry.hasValue();
                          V oldVal = entry.rawGet();
                          V newVal = res.value(i);
                          byte[] newBytes = res.valueBytes(i);

                          GridCacheVersion dhtVer = res.dhtVersion(i);
                          GridCacheVersion mappedVer = res.mappedVersion(i);

                          // On local node don't record twice if DHT cache already recorded.
                          boolean record =
                              retval && oldValTup != null && oldValTup.get1().equals(dhtVer);

                          if (newVal == null) {
                            if (oldValTup != null) {
                              if (oldValTup.get1().equals(dhtVer)) {
                                newVal = oldValTup.get2();

                                newBytes = oldValTup.get3();
                              }

                              oldVal = oldValTup.get2();
                            }
                          }

                          // Lock is held at this point, so we can set the
                          // returned value if any.
                          entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

                          entry.readyNearLock(
                              lockVer,
                              mappedVer,
                              res.committedVersions(),
                              res.rolledbackVersions(),
                              res.pending());

                          if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                            boolean pass = res.filterResult(i);

                            tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
                          }

                          if (record) {
                            if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
                              cctx.events()
                                  .addEvent(
                                      entry.partition(),
                                      entry.key(),
                                      tx,
                                      null,
                                      EVT_CACHE_OBJECT_READ,
                                      newVal,
                                      newVal != null,
                                      oldVal,
                                      hasBytes,
                                      CU.subjectId(tx, cctx));

                            cctx.cache().metrics0().onRead(oldVal != null);
                          }

                          if (log.isDebugEnabled())
                            log.debug(
                                "Processed response for entry [res="
                                    + res
                                    + ", entry="
                                    + entry
                                    + ']');

                          break; // Inner while loop.
                        } catch (GridCacheEntryRemovedException ignored) {
                          if (log.isDebugEnabled())
                            log.debug(
                                "Failed to add candidates because entry was "
                                    + "removed (will renew).");

                          // Replace old entry with new one.
                          entries.set(
                              i,
                              (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
                        }
                      }

                      i++; // Increment outside of while loop.
                    }

                    // Proceed and add new future (if any) before completing embedded future.
                    proceedMapping(mappings);
                  } catch (GridException ex) {
                    onError(ex);

                    return false;
                  }

                  return true;
                }
              }));
    } else {
      final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      GridFuture<?> txSync = null;

      if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());

      if (txSync == null || txSync.isDone()) {
        try {
          if (log.isDebugEnabled())
            log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');

          cctx.io().send(node, req);
        } catch (GridTopologyException ex) {
          assert fut != null;

          fut.onResult(ex);
        }
      } else {
        txSync.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> t) {
                try {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Sending near lock request [node=" + node.id() + ", req=" + req + ']');

                  cctx.io().send(node, req);
                } catch (GridTopologyException ex) {
                  assert fut != null;

                  fut.onResult(ex);
                } catch (GridException e) {
                  onError(e);
                }
              }
            });
      }
    }
  }
示例#13
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }