/** @param m Mapping. */
  @SuppressWarnings({"unchecked"})
  private void finish(GridDistributedTxMapping<K, V> m) {
    GridRichNode n = m.node();

    assert !m.empty();

    GridNearTxFinishRequest req =
        new GridNearTxFinishRequest<K, V>(
            futId,
            tx.xidVersion(),
            tx.commitVersion(),
            tx.threadId(),
            commit,
            tx.isInvalidate(),
            m.explicitLock(),
            tx.topologyVersion(),
            null,
            null,
            null,
            commit && tx.pessimistic() ? m.writes() : null,
            tx.syncCommit() && commit || tx.syncRollback() && !commit);

    // If this is the primary node for the keys.
    if (n.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (CU.DHT_ENABLED) {
        GridFuture<GridCacheTx> fut =
            commit ? dht().commitTx(n.id(), req) : dht().rollbackTx(n.id(), req);

        // Add new future.
        add(fut);
      } else
        // Add done future for testing.
        add(new GridFinishedFuture<GridCacheTx>(ctx));
    } else {
      MiniFuture fut = new MiniFuture(m);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      try {
        cctx.io().send(n, req);

        // If we don't wait for result, then mark future as done.
        if (!isSync() && !m.explicitLock()) fut.onDone();
      } catch (GridTopologyException e) {
        // Remove previous mapping.
        mappings.remove(m.node().id());

        fut.onResult(e);
      } catch (GridException e) {
        // Fail the whole thing.
        fut.onResult(e);
      }
    }
  }
Exemplo n.º 2
0
  /**
   * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
   * remote primary node.
   *
   * @param mappings Queue of mappings.
   * @throws GridException If mapping can not be completed.
   */
  private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings)
      throws GridException {
    GridNearLockMapping<K, V> map = mappings.poll();

    // If there are no more mappings to process, complete the future.
    if (map == null) return;

    final GridNearLockRequest<K, V> req = map.request();
    final Collection<K> mappedKeys = map.distributedKeys();
    final GridNode node = map.node();

    if (filter != null && filter.length != 0) req.filter(filter, cctx);

    if (node.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req);

      GridFuture<GridNearLockResponse<K, V>> fut;

      if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter);
      else {
        // Create dummy values for testing.
        GridNearLockResponse<K, V> res =
            new GridNearLockResponse<>(lockVer, futId, null, false, 1, null);

        res.addValueBytes(null, null, true, lockVer, lockVer, cctx);

        fut = new GridFinishedFuture<>(ctx, res);
      }

      // Add new future.
      add(
          new GridEmbeddedFuture<>(
              cctx.kernalContext(),
              fut,
              new C2<GridNearLockResponse<K, V>, Exception, Boolean>() {
                @Override
                public Boolean apply(GridNearLockResponse<K, V> res, Exception e) {
                  if (CU.isLockTimeoutOrCancelled(e)
                      || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false;

                  if (e != null) {
                    onError(e);

                    return false;
                  }

                  if (res == null) {
                    onError(new GridException("Lock response is null for future: " + this));

                    return false;
                  }

                  if (res.error() != null) {
                    onError(res.error());

                    return false;
                  }

                  if (log.isDebugEnabled())
                    log.debug(
                        "Acquired lock for local DHT mapping [locId="
                            + cctx.nodeId()
                            + ", mappedKeys="
                            + mappedKeys
                            + ", fut="
                            + GridNearLockFuture.this
                            + ']');

                  try {
                    int i = 0;

                    for (K k : mappedKeys) {
                      while (true) {
                        GridNearCacheEntry<K, V> entry =
                            cctx.near().entryExx(k, req.topologyVersion());

                        try {
                          GridTuple3<GridCacheVersion, V, byte[]> oldValTup =
                              valMap.get(entry.key());

                          boolean hasBytes = entry.hasValue();
                          V oldVal = entry.rawGet();
                          V newVal = res.value(i);
                          byte[] newBytes = res.valueBytes(i);

                          GridCacheVersion dhtVer = res.dhtVersion(i);
                          GridCacheVersion mappedVer = res.mappedVersion(i);

                          // On local node don't record twice if DHT cache already recorded.
                          boolean record =
                              retval && oldValTup != null && oldValTup.get1().equals(dhtVer);

                          if (newVal == null) {
                            if (oldValTup != null) {
                              if (oldValTup.get1().equals(dhtVer)) {
                                newVal = oldValTup.get2();

                                newBytes = oldValTup.get3();
                              }

                              oldVal = oldValTup.get2();
                            }
                          }

                          // Lock is held at this point, so we can set the
                          // returned value if any.
                          entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

                          entry.readyNearLock(
                              lockVer,
                              mappedVer,
                              res.committedVersions(),
                              res.rolledbackVersions(),
                              res.pending());

                          if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                            boolean pass = res.filterResult(i);

                            tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
                          }

                          if (record) {
                            if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
                              cctx.events()
                                  .addEvent(
                                      entry.partition(),
                                      entry.key(),
                                      tx,
                                      null,
                                      EVT_CACHE_OBJECT_READ,
                                      newVal,
                                      newVal != null,
                                      oldVal,
                                      hasBytes,
                                      CU.subjectId(tx, cctx));

                            cctx.cache().metrics0().onRead(oldVal != null);
                          }

                          if (log.isDebugEnabled())
                            log.debug(
                                "Processed response for entry [res="
                                    + res
                                    + ", entry="
                                    + entry
                                    + ']');

                          break; // Inner while loop.
                        } catch (GridCacheEntryRemovedException ignored) {
                          if (log.isDebugEnabled())
                            log.debug(
                                "Failed to add candidates because entry was "
                                    + "removed (will renew).");

                          // Replace old entry with new one.
                          entries.set(
                              i,
                              (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
                        }
                      }

                      i++; // Increment outside of while loop.
                    }

                    // Proceed and add new future (if any) before completing embedded future.
                    proceedMapping(mappings);
                  } catch (GridException ex) {
                    onError(ex);

                    return false;
                  }

                  return true;
                }
              }));
    } else {
      final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      GridFuture<?> txSync = null;

      if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());

      if (txSync == null || txSync.isDone()) {
        try {
          if (log.isDebugEnabled())
            log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');

          cctx.io().send(node, req);
        } catch (GridTopologyException ex) {
          assert fut != null;

          fut.onResult(ex);
        }
      } else {
        txSync.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> t) {
                try {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Sending near lock request [node=" + node.id() + ", req=" + req + ']');

                  cctx.io().send(node, req);
                } catch (GridTopologyException ex) {
                  assert fut != null;

                  fut.onResult(ex);
                } catch (GridException e) {
                  onError(e);
                }
              }
            });
      }
    }
  }