Пример #1
0
    /** @param e Node left exception. */
    void onResult(GridTopologyException e) {
      if (isDone()) return;

      if (rcvRes.compareAndSet(false, true)) {
        if (log.isDebugEnabled())
          log.debug(
              "Remote node left grid while sending or waiting for reply (will fail): " + this);

        if (tx != null) tx.removeMapping(node.id());

        // Primary node left the grid, so fail the future.
        GridNearLockFuture.this.onDone(newTopologyException(e, node.id()));

        onDone(true);
      }
    }
Пример #2
0
  /**
   * @param mapping Mappings.
   * @param key Key to map.
   * @param topVer Topology version.
   * @return Near lock mapping.
   * @throws GridException If mapping for key failed.
   */
  private GridNearLockMapping<K, V> map(
      K key, @Nullable GridNearLockMapping<K, V> mapping, long topVer) throws GridException {
    assert mapping == null || mapping.node() != null;

    GridNode primary = cctx.affinity().primary(key, topVer);

    if (cctx.discovery().node(primary.id()) == null)
      // If primary node left the grid before lock acquisition, fail the whole future.
      throw newTopologyException(null, primary.id());

    if (inTx() && tx.groupLock() && !primary.isLocal())
      throw new GridException(
          "Failed to start group lock transaction (local node is not primary for "
              + " key) [key="
              + key
              + ", primaryNodeId="
              + primary.id()
              + ']');

    if (mapping == null || !primary.id().equals(mapping.node().id()))
      mapping = new GridNearLockMapping<>(primary, key);
    else mapping.addKey(key);

    return mapping;
  }
Пример #3
0
    void onNodeLeft() {
      assert !isLocNode;
      assert bufMappings.get(node.id()) != this;

      if (log.isDebugEnabled())
        log.debug("Forcibly completing futures (node has left): " + node.id());

      Exception e =
          new GridTopologyException(
              "Failed to wait for request completion " + "(node has left): " + node.id());

      for (GridFutureAdapter<Object> f : reqs.values()) f.onDone(e);

      // Make sure to complete current future.
      GridFutureAdapter<Object> curFut0;

      synchronized (this) {
        curFut0 = curFut;
      }

      curFut0.onDone(e);
    }
Пример #4
0
    /** @param node Node. */
    Buffer(GridNode node) {
      assert node != null;

      this.node = node;

      locFuts = new GridConcurrentHashSet<>();
      reqs = new ConcurrentHashMap8<>();

      // Cache local node flag.
      isLocNode = node.equals(ctx.discovery().localNode());

      entries = newEntries();
      curFut = new GridFutureAdapter<>(ctx);
      curFut.listenAsync(signalC);

      sem = new Semaphore(parallelOps);
    }
Пример #5
0
    /**
     * @param entries Entries to submit.
     * @param curFut Current future.
     * @throws GridInterruptedException If interrupted.
     */
    private void submit(final List<Map.Entry<K, V>> entries, final GridFutureAdapter<Object> curFut)
        throws GridInterruptedException {
      assert entries != null;
      assert !entries.isEmpty();
      assert curFut != null;

      incrementActiveTasks();

      GridFuture<Object> fut;
      if (isLocNode) {
        fut =
            ctx.closure()
                .callLocalSafe(
                    new GridDataLoadUpdateJob<>(ctx, log, cacheName, entries, false, updater),
                    false);

        locFuts.add(fut);

        fut.listenAsync(
            new GridInClosure<GridFuture<Object>>() {
              @Override
              public void apply(GridFuture<Object> t) {
                try {
                  boolean rmv = locFuts.remove(t);

                  assert rmv;

                  curFut.onDone(t.get());
                } catch (GridException e) {
                  curFut.onDone(e);
                }
              }
            });
      } else {
        byte[] entriesBytes;

        try {
          entriesBytes = ctx.config().getMarshaller().marshal(entries);

          if (updaterBytes == null) {
            assert updater != null;

            updaterBytes = ctx.config().getMarshaller().marshal(updater);
          }

          if (topicBytes == null) topicBytes = ctx.config().getMarshaller().marshal(topic);
        } catch (GridException e) {
          U.error(log, "Failed to marshal (request will not be sent).", e);

          return;
        }

        GridDeployment dep = null;
        GridPeerDeployAware jobPda0 = null;

        if (ctx.deploy().enabled()) {
          try {
            jobPda0 = jobPda;

            assert jobPda0 != null;

            dep = ctx.deploy().deploy(jobPda0.deployClass(), jobPda0.classLoader());
          } catch (GridException e) {
            U.error(
                log,
                "Failed to deploy class (request will not be sent): " + jobPda0.deployClass(),
                e);

            return;
          }

          if (dep == null)
            U.warn(log, "Failed to deploy class (request will be sent): " + jobPda0.deployClass());
        }

        long reqId = idGen.incrementAndGet();

        fut = curFut;

        reqs.put(reqId, (GridFutureAdapter<Object>) fut);

        GridDataLoadRequest<Object, Object> req =
            new GridDataLoadRequest<>(
                reqId,
                topicBytes,
                cacheName,
                updaterBytes,
                entriesBytes,
                true,
                dep != null ? dep.deployMode() : null,
                dep != null ? jobPda0.deployClass().getName() : null,
                dep != null ? dep.userVersion() : null,
                dep != null ? dep.participants() : null,
                dep != null ? dep.classLoaderId() : null,
                dep == null);

        try {
          ctx.io().send(node, TOPIC_DATALOAD, req, PUBLIC_POOL);

          if (log.isDebugEnabled())
            log.debug("Sent request to node [nodeId=" + node.id() + ", req=" + req + ']');
        } catch (GridException e) {
          if (ctx.discovery().alive(node) && ctx.discovery().pingNode(node.id()))
            ((GridFutureAdapter<Object>) fut).onDone(e);
          else
            ((GridFutureAdapter<Object>) fut)
                .onDone(
                    new GridTopologyException(
                        "Failed to send " + "request (node has left): " + node.id()));
        }
      }
    }
Пример #6
0
  /**
   * Gets next near lock mapping and either acquires dht locks locally or sends near lock request to
   * remote primary node.
   *
   * @param mappings Queue of mappings.
   * @throws GridException If mapping can not be completed.
   */
  private void proceedMapping(final ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings)
      throws GridException {
    GridNearLockMapping<K, V> map = mappings.poll();

    // If there are no more mappings to process, complete the future.
    if (map == null) return;

    final GridNearLockRequest<K, V> req = map.request();
    final Collection<K> mappedKeys = map.distributedKeys();
    final GridNode node = map.node();

    if (filter != null && filter.length != 0) req.filter(filter, cctx);

    if (node.isLocal()) {
      req.miniId(GridUuid.randomUuid());

      if (log.isDebugEnabled()) log.debug("Before locally locking near request: " + req);

      GridFuture<GridNearLockResponse<K, V>> fut;

      if (CU.DHT_ENABLED) fut = dht().lockAllAsync(cctx.localNode(), req, filter);
      else {
        // Create dummy values for testing.
        GridNearLockResponse<K, V> res =
            new GridNearLockResponse<>(lockVer, futId, null, false, 1, null);

        res.addValueBytes(null, null, true, lockVer, lockVer, cctx);

        fut = new GridFinishedFuture<>(ctx, res);
      }

      // Add new future.
      add(
          new GridEmbeddedFuture<>(
              cctx.kernalContext(),
              fut,
              new C2<GridNearLockResponse<K, V>, Exception, Boolean>() {
                @Override
                public Boolean apply(GridNearLockResponse<K, V> res, Exception e) {
                  if (CU.isLockTimeoutOrCancelled(e)
                      || (res != null && CU.isLockTimeoutOrCancelled(res.error()))) return false;

                  if (e != null) {
                    onError(e);

                    return false;
                  }

                  if (res == null) {
                    onError(new GridException("Lock response is null for future: " + this));

                    return false;
                  }

                  if (res.error() != null) {
                    onError(res.error());

                    return false;
                  }

                  if (log.isDebugEnabled())
                    log.debug(
                        "Acquired lock for local DHT mapping [locId="
                            + cctx.nodeId()
                            + ", mappedKeys="
                            + mappedKeys
                            + ", fut="
                            + GridNearLockFuture.this
                            + ']');

                  try {
                    int i = 0;

                    for (K k : mappedKeys) {
                      while (true) {
                        GridNearCacheEntry<K, V> entry =
                            cctx.near().entryExx(k, req.topologyVersion());

                        try {
                          GridTuple3<GridCacheVersion, V, byte[]> oldValTup =
                              valMap.get(entry.key());

                          boolean hasBytes = entry.hasValue();
                          V oldVal = entry.rawGet();
                          V newVal = res.value(i);
                          byte[] newBytes = res.valueBytes(i);

                          GridCacheVersion dhtVer = res.dhtVersion(i);
                          GridCacheVersion mappedVer = res.mappedVersion(i);

                          // On local node don't record twice if DHT cache already recorded.
                          boolean record =
                              retval && oldValTup != null && oldValTup.get1().equals(dhtVer);

                          if (newVal == null) {
                            if (oldValTup != null) {
                              if (oldValTup.get1().equals(dhtVer)) {
                                newVal = oldValTup.get2();

                                newBytes = oldValTup.get3();
                              }

                              oldVal = oldValTup.get2();
                            }
                          }

                          // Lock is held at this point, so we can set the
                          // returned value if any.
                          entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

                          entry.readyNearLock(
                              lockVer,
                              mappedVer,
                              res.committedVersions(),
                              res.rolledbackVersions(),
                              res.pending());

                          if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                            boolean pass = res.filterResult(i);

                            tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
                          }

                          if (record) {
                            if (cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
                              cctx.events()
                                  .addEvent(
                                      entry.partition(),
                                      entry.key(),
                                      tx,
                                      null,
                                      EVT_CACHE_OBJECT_READ,
                                      newVal,
                                      newVal != null,
                                      oldVal,
                                      hasBytes,
                                      CU.subjectId(tx, cctx));

                            cctx.cache().metrics0().onRead(oldVal != null);
                          }

                          if (log.isDebugEnabled())
                            log.debug(
                                "Processed response for entry [res="
                                    + res
                                    + ", entry="
                                    + entry
                                    + ']');

                          break; // Inner while loop.
                        } catch (GridCacheEntryRemovedException ignored) {
                          if (log.isDebugEnabled())
                            log.debug(
                                "Failed to add candidates because entry was "
                                    + "removed (will renew).");

                          // Replace old entry with new one.
                          entries.set(
                              i,
                              (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
                        }
                      }

                      i++; // Increment outside of while loop.
                    }

                    // Proceed and add new future (if any) before completing embedded future.
                    proceedMapping(mappings);
                  } catch (GridException ex) {
                    onError(ex);

                    return false;
                  }

                  return true;
                }
              }));
    } else {
      final MiniFuture fut = new MiniFuture(node, mappedKeys, mappings);

      req.miniId(fut.futureId());

      add(fut); // Append new future.

      GridFuture<?> txSync = null;

      if (inTx()) txSync = cctx.tm().awaitFinishAckAsync(node.id(), tx.threadId());

      if (txSync == null || txSync.isDone()) {
        try {
          if (log.isDebugEnabled())
            log.debug("Sending near lock request [node=" + node.id() + ", req=" + req + ']');

          cctx.io().send(node, req);
        } catch (GridTopologyException ex) {
          assert fut != null;

          fut.onResult(ex);
        }
      } else {
        txSync.listenAsync(
            new CI1<GridFuture<?>>() {
              @Override
              public void apply(GridFuture<?> t) {
                try {
                  if (log.isDebugEnabled())
                    log.debug(
                        "Sending near lock request [node=" + node.id() + ", req=" + req + ']');

                  cctx.io().send(node, req);
                } catch (GridTopologyException ex) {
                  assert fut != null;

                  fut.onResult(ex);
                } catch (GridException e) {
                  onError(e);
                }
              }
            });
      }
    }
  }
Пример #7
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
Пример #8
0
 /** {@inheritDoc} */
 @Override
 public String toString() {
   return S.toString(MiniFuture.class, this, "node", node.id(), "super", super.toString());
 }
Пример #9
0
    /** @param res Result callback. */
    void onResult(GridNearLockResponse<K, V> res) {
      if (rcvRes.compareAndSet(false, true)) {
        if (res.error() != null) {
          if (log.isDebugEnabled())
            log.debug(
                "Finishing mini future with an error due to error in response [miniFut="
                    + this
                    + ", res="
                    + res
                    + ']');

          // Fail.
          if (res.error() instanceof GridCacheLockTimeoutException) onDone(false);
          else onDone(res.error());

          return;
        }

        int i = 0;

        long topVer = topSnapshot.get().topologyVersion();

        for (K k : keys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer);

            try {
              if (res.dhtVersion(i) == null) {
                onDone(
                    new GridException(
                        "Failed to receive DHT version from remote node "
                            + "(will fail the lock): "
                            + res));

                return;
              }

              GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key());

              V oldVal = entry.rawGet();
              boolean hasOldVal = false;
              V newVal = res.value(i);
              byte[] newBytes = res.valueBytes(i);

              boolean readRecordable = false;

              if (retval) {
                readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ);

                if (readRecordable) hasOldVal = entry.hasValue();
              }

              GridCacheVersion dhtVer = res.dhtVersion(i);
              GridCacheVersion mappedVer = res.mappedVersion(i);

              if (newVal == null) {
                if (oldValTup != null) {
                  if (oldValTup.get1().equals(dhtVer)) {
                    newVal = oldValTup.get2();

                    newBytes = oldValTup.get3();
                  }

                  oldVal = oldValTup.get2();
                }
              }

              // Lock is held at this point, so we can set the
              // returned value if any.
              entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

              if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                boolean pass = res.filterResult(i);

                tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
              }

              entry.readyNearLock(
                  lockVer,
                  mappedVer,
                  res.committedVersions(),
                  res.rolledbackVersions(),
                  res.pending());

              if (retval) {
                if (readRecordable)
                  cctx.events()
                      .addEvent(
                          entry.partition(),
                          entry.key(),
                          tx,
                          null,
                          EVT_CACHE_OBJECT_READ,
                          newVal,
                          newVal != null || newBytes != null,
                          oldVal,
                          hasOldVal,
                          CU.subjectId(tx, cctx));

                cctx.cache().metrics0().onRead(false);
              }

              if (log.isDebugEnabled())
                log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');

              break; // Inner while loop.
            } catch (GridCacheEntryRemovedException ignored) {
              if (log.isDebugEnabled())
                log.debug("Failed to add candidates because entry was removed (will renew).");

              // Replace old entry with new one.
              entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
            } catch (GridException e) {
              onDone(e);

              return;
            }
          }

          i++;
        }

        try {
          proceedMapping(mappings);
        } catch (GridException e) {
          onDone(e);
        }

        onDone(true);
      }
    }