Exemplo n.º 1
0
  /** {@inheritDoc} */
  @Override
  public GridFuture<Boolean> loadMissing(
      boolean async, final Collection<? extends K> keys, final GridInClosure2<K, V> closure) {
    GridFuture<Map<K, V>> f = cctx.near().txLoadAsync(this, keys, CU.<K, V>empty());

    return new GridEmbeddedFuture<Boolean, Map<K, V>>(
        cctx.kernalContext(),
        f,
        new C2<Map<K, V>, Exception, Boolean>() {
          @Override
          public Boolean apply(Map<K, V> map, Exception e) {
            if (e != null) {
              setRollbackOnly();

              throw new GridClosureException(e);
            }

            // Must loop through keys, not map entries,
            // as map entries may not have all the keys.
            for (K key : keys) closure.apply(key, map.get(key));

            return true;
          }
        });
  }
  /**
   * @param cctx Context.
   * @param id Partition ID.
   */
  @SuppressWarnings("ExternalizableWithoutPublicNoArgConstructor")
  GridDhtLocalPartition(GridCacheContext cctx, int id) {
    assert cctx != null;

    this.id = id;
    this.cctx = cctx;

    log = U.logger(cctx.kernalContext(), logRef, this);

    rent =
        new GridFutureAdapter<Object>() {
          @Override
          public String toString() {
            return "PartitionRentFuture [part=" + GridDhtLocalPartition.this + ", map=" + map + ']';
          }
        };

    map = new ConcurrentHashMap8<>(cctx.config().getStartSize() / cctx.affinity().partitions());

    int delQueueSize =
        CU.isSystemCache(cctx.name())
            ? 100
            : Math.max(MAX_DELETE_QUEUE_SIZE / cctx.affinity().partitions(), 20);

    rmvQueue = new GridCircularBuffer<>(U.ceilPow2(delQueueSize));
  }
    /** {@inheritDoc} */
    @Override
    public Integer call() throws Exception {
      GridCacheTx tx = CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ);

      try {
        GridCacheCountDownLatchValue latchVal = latchView.get(key);

        if (latchVal == null) {
          if (log.isDebugEnabled())
            log.debug("Failed to find count down latch with given name: " + name);

          assert cnt == 0;

          return cnt;
        }

        int retVal;

        if (val > 0) {
          retVal = latchVal.get() - val;

          if (retVal < 0) retVal = 0;
        } else retVal = 0;

        latchVal.set(retVal);

        latchView.put(key, latchVal);

        tx.commit();

        return retVal;
      } finally {
        tx.end();
      }
    }
  /**
   * @param entry Transaction entry.
   * @param nodes Nodes.
   */
  private void map(GridCacheTxEntry<K, V> entry, Collection<GridRichNode> nodes) {
    GridRichNode primary = CU.primary0(cctx.affinity(entry.key(), nodes));

    GridDistributedTxMapping<K, V> t = mappings.get(primary.id());

    if (t == null) mappings.put(primary.id(), t = new GridDistributedTxMapping<K, V>(primary));

    t.add(entry);
  }
  /**
   * @param cacheId Cache ID.
   * @return {@code True} if local client has been added.
   */
  public boolean isLocalClientAdded(int cacheId) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    return false;
  }
  /** {@inheritDoc} */
  @Override
  public void writeExternal(ObjectOutput out) throws IOException {
    GridUtils.writeUuid(out, nodeId);

    CU.writeVersion(out, ver);

    out.writeLong(timeout);
    out.writeLong(threadId);
    out.writeLong(id);
    out.writeShort(flags());
  }
  /**
   * @param cacheCtx Cache context.
   * @return {@code True} if local node can calculate affinity on it's own for this partition map
   *     exchange.
   */
  private boolean canCalculateAffinity(GridCacheContext cacheCtx) {
    AffinityFunction affFunc = cacheCtx.config().getAffinity();

    // Do not request affinity from remote nodes if affinity function is not centralized.
    if (!U.hasAnnotation(affFunc, AffinityCentralizedFunction.class)) return true;

    // If local node did not initiate exchange or local node is the only cache node in grid.
    Collection<ClusterNode> affNodes = CU.affinityNodes(cacheCtx, exchId.topologyVersion());

    return !exchId.nodeId().equals(cctx.localNodeId())
        || (affNodes.size() == 1 && affNodes.contains(cctx.localNode()));
  }
  /**
   * @param cacheCtx Cache context.
   * @throws IgniteCheckedException If failed.
   */
  private void initTopology(GridCacheContext cacheCtx) throws IgniteCheckedException {
    if (stopping(cacheCtx.cacheId())) return;

    if (canCalculateAffinity(cacheCtx)) {
      if (log.isDebugEnabled())
        log.debug(
            "Will recalculate affinity [locNodeId="
                + cctx.localNodeId()
                + ", exchId="
                + exchId
                + ']');

      cacheCtx.affinity().calculateAffinity(exchId.topologyVersion(), discoEvt);
    } else {
      if (log.isDebugEnabled())
        log.debug(
            "Will request affinity from remote node [locNodeId="
                + cctx.localNodeId()
                + ", exchId="
                + exchId
                + ']');

      // Fetch affinity assignment from remote node.
      GridDhtAssignmentFetchFuture fetchFut =
          new GridDhtAssignmentFetchFuture(
              cacheCtx,
              exchId.topologyVersion(),
              CU.affinityNodes(cacheCtx, exchId.topologyVersion()));

      fetchFut.init();

      List<List<ClusterNode>> affAssignment = fetchFut.get();

      if (log.isDebugEnabled())
        log.debug(
            "Fetched affinity from remote node, initializing affinity assignment [locNodeId="
                + cctx.localNodeId()
                + ", topVer="
                + exchId.topologyVersion()
                + ']');

      if (affAssignment == null) {
        affAssignment = new ArrayList<>(cacheCtx.affinity().partitions());

        List<ClusterNode> empty = Collections.emptyList();

        for (int i = 0; i < cacheCtx.affinity().partitions(); i++) affAssignment.add(empty);
      }

      cacheCtx.affinity().initializeAffinity(exchId.topologyVersion(), affAssignment);
    }
  }
  /**
   * @param reads Read entries.
   * @param writes Write entries.
   */
  @SuppressWarnings({"unchecked"})
  private void prepare(
      Iterable<GridCacheTxEntry<K, V>> reads, Iterable<GridCacheTxEntry<K, V>> writes) {
    Collection<GridRichNode> nodes = CU.allNodes(cctx);

    // Assign keys to primary nodes.
    for (GridCacheTxEntry<K, V> read : reads) map(read, nodes);

    for (GridCacheTxEntry<K, V> write : writes) map(write, nodes);

    // Create mini futures.
    for (GridDistributedTxMapping<K, V> m : mappings.values()) finish(m);
  }
  /**
   * @param cacheId Cache ID to check.
   * @param topVer Topology version.
   * @return {@code True} if cache was added during this exchange.
   */
  public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && !req.clientStartOnly()) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId);

    return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer);
  }
  /**
   * @param cctx Context.
   * @param tx Transaction.
   * @param failedNodeId ID of failed node started transaction.
   */
  @SuppressWarnings("ConstantConditions")
  public GridCachePessimisticCheckCommittedTxFuture(
      GridCacheContext<K, V> cctx, GridCacheTxEx<K, V> tx, UUID failedNodeId) {
    super(cctx.kernalContext(), new SingleReducer<K, V>());

    this.cctx = cctx;
    this.tx = tx;
    this.failedNodeId = failedNodeId;

    log = U.logger(ctx, logRef, GridCacheOptimisticCheckPreparedTxFuture.class);

    nodes = new GridLeanMap<>();

    for (GridNode node : CU.allNodes(cctx, tx.topologyVersion())) nodes.put(node.id(), node);
  }
  /**
   * Updates partition map in all caches.
   *
   * @param msg Partitions full messages.
   */
  private void updatePartitionFullMap(GridDhtPartitionsFullMessage msg) {
    for (Map.Entry<Integer, GridDhtPartitionFullMap> entry : msg.partitions().entrySet()) {
      Integer cacheId = entry.getKey();

      GridCacheContext cacheCtx = cctx.cacheContext(cacheId);

      if (cacheCtx != null) cacheCtx.topology().update(exchId, entry.getValue());
      else {
        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, AffinityTopologyVersion.NONE);

        if (oldest != null && oldest.isLocal())
          cctx.exchange().clientTopology(cacheId, this).update(exchId, entry.getValue());
      }
    }
  }
  /**
   * @param cacheId Cache ID to check.
   * @return {@code True} if cache is stopping by this exchange.
   */
  private boolean stopping(int cacheId) {
    boolean stopping = false;

    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (cacheId == CU.cacheId(req.cacheName())) {
          stopping = req.stop();

          break;
        }
      }
    }

    return stopping;
  }
  /** @throws GridException If operation failed. */
  private void initializeLatch() throws GridException {
    if (initGuard.compareAndSet(false, true)) {
      try {
        internalLatch =
            CU.outTx(
                new Callable<CountDownLatch>() {
                  @Override
                  public CountDownLatch call() throws Exception {
                    GridCacheTx tx =
                        CU.txStartInternal(ctx, latchView, PESSIMISTIC, REPEATABLE_READ);

                    try {
                      GridCacheCountDownLatchValue val = latchView.get(key);

                      if (val == null) {
                        if (log.isDebugEnabled())
                          log.debug("Failed to find count down latch with given name: " + name);

                        assert cnt == 0;

                        return new CountDownLatch(cnt);
                      }

                      tx.commit();

                      return new CountDownLatch(val.get());
                    } finally {
                      tx.end();
                    }
                  }
                },
                ctx);

        if (log.isDebugEnabled()) log.debug("Initialized internal latch: " + internalLatch);
      } finally {
        initLatch.countDown();
      }
    } else {
      try {
        initLatch.await();
      } catch (InterruptedException ignored) {
        throw new GridException("Thread has been interrupted.");
      }

      if (internalLatch == null)
        throw new GridException("Internal latch has not been properly initialized.");
    }
  }
  /** {@inheritDoc} */
  @Override
  public boolean onDone(AffinityTopologyVersion res, Throwable err) {
    Map<Integer, Boolean> m = null;

    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
      if (cacheCtx.config().getTopologyValidator() != null && !CU.isSystemCache(cacheCtx.name())) {
        if (m == null) m = new HashMap<>();

        m.put(
            cacheCtx.cacheId(),
            cacheCtx.config().getTopologyValidator().validate(discoEvt.topologyNodes()));
      }
    }

    cacheValidRes = m != null ? m : Collections.<Integer, Boolean>emptyMap();

    cctx.cache().onExchangeDone(exchId.topologyVersion(), reqs, err);

    cctx.exchange().onExchangeDone(this, err);

    if (super.onDone(res, err) && !dummy && !forcePreload) {
      if (log.isDebugEnabled())
        log.debug(
            "Completed partition exchange [localNode="
                + cctx.localNodeId()
                + ", exchange= "
                + this
                + ']');

      initFut.onDone(err == null);

      GridTimeoutObject timeoutObj = this.timeoutObj;

      // Deschedule timeout object.
      if (timeoutObj != null) cctx.kernalContext().timeout().removeTimeoutObject(timeoutObj);

      if (exchId.isLeft()) {
        for (GridCacheContext cacheCtx : cctx.cacheContexts())
          cacheCtx.config().getAffinity().removeNode(exchId.nodeId());
      }

      return true;
    }

    return dummy;
  }
  /** {@inheritDoc} */
  @Override
  public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
    nodeId = GridUtils.readUuid(in);

    ver = CU.readVersion(in);

    timeout = in.readLong();
    threadId = in.readLong();
    id = in.readLong();

    short flags = in.readShort();

    mask(OWNER, OWNER.get(flags));
    mask(USED, USED.get(flags));
    mask(TX, TX.get(flags));

    ts = U.currentTimeMillis();
  }
Exemplo n.º 17
0
  /**
   * @param cctx Registry.
   * @param keys Keys to lock.
   * @param tx Transaction.
   * @param read Read flag.
   * @param retval Flag to return value or not.
   * @param timeout Lock acquisition timeout.
   * @param filter Filter.
   */
  public GridNearLockFuture(
      GridCacheContext<K, V> cctx,
      Collection<? extends K> keys,
      @Nullable GridNearTxLocal<K, V> tx,
      boolean read,
      boolean retval,
      long timeout,
      GridPredicate<GridCacheEntry<K, V>>[] filter) {
    super(cctx.kernalContext(), CU.boolReducer());
    assert cctx != null;
    assert keys != null;

    this.cctx = cctx;
    this.keys = keys;
    this.tx = tx;
    this.read = read;
    this.retval = retval;
    this.timeout = timeout;
    this.filter = filter;

    threadId = tx == null ? Thread.currentThread().getId() : tx.threadId();

    lockVer = tx != null ? tx.xidVersion() : cctx.versions().next();

    futId = GridUuid.randomUuid();

    entries = new ArrayList<>(keys.size());

    log = U.logger(ctx, logRef, GridNearLockFuture.class);

    if (timeout > 0) {
      timeoutObj = new LockTimeoutObject();

      cctx.time().addTimeoutObject(timeoutObj);
    }

    valMap = new ConcurrentHashMap8<>(keys.size(), 1f);
  }
  /** Initializes future. */
  public void prepare() {
    if (log.isDebugEnabled())
      log.debug("Checking if transaction was committed on remote nodes: " + tx);

    // Check local node first (local node can be a backup node for some part of this transaction).
    long originatingThreadId = tx.threadId();

    if (tx instanceof GridCacheTxRemoteEx)
      originatingThreadId = ((GridCacheTxRemoteEx) tx).remoteThreadId();

    GridCacheCommittedTxInfo<K, V> txInfo =
        cctx.tm().txCommitted(tx.nearXidVersion(), tx.eventNodeId(), originatingThreadId);

    if (txInfo != null) {
      onDone(txInfo);

      markInitialized();

      return;
    }

    Collection<GridNode> checkNodes = CU.remoteNodes(cctx, tx.topologyVersion());

    if (tx instanceof GridDhtTxRemote) {
      // If we got primary node failure and near node has not failed.
      if (tx.nodeId().equals(failedNodeId) && !tx.eventNodeId().equals(failedNodeId)) {
        nearCheck = true;

        GridNode nearNode = cctx.discovery().node(tx.eventNodeId());

        if (nearNode == null) {
          // Near node failed, separate check prepared future will take care of it.
          onDone(
              new GridTopologyException(
                  "Failed to check near transaction state (near node left grid): "
                      + tx.eventNodeId()));

          return;
        }

        checkNodes = Collections.singletonList(nearNode);
      }
    }

    for (GridNode rmtNode : checkNodes) {
      // Skip left nodes and local node.
      if (rmtNode.id().equals(failedNodeId)) continue;

      /*
       * Send message to all cache nodes in the topology.
       */

      MiniFuture fut = new MiniFuture(rmtNode.id());

      GridCachePessimisticCheckCommittedTxRequest<K, V> req =
          new GridCachePessimisticCheckCommittedTxRequest<>(
              tx, originatingThreadId, futureId(), fut.futureId());

      add(fut);

      try {
        cctx.io().send(rmtNode.id(), req);
      } catch (GridTopologyException ignored) {
        fut.onNodeLeft();
      } catch (GridException e) {
        fut.onError(e);

        break;
      }
    }

    markInitialized();
  }
 /** {@inheritDoc} */
 @Override
 public int countDown() throws GridException {
   return CU.outTx(new CountDownCallable(1), ctx);
 }
  /**
   * Starts activity.
   *
   * @throws IgniteInterruptedCheckedException If interrupted.
   */
  public void init() throws IgniteInterruptedCheckedException {
    if (isDone()) return;

    if (init.compareAndSet(false, true)) {
      if (isDone()) return;

      try {
        // Wait for event to occur to make sure that discovery
        // will return corresponding nodes.
        U.await(evtLatch);

        assert discoEvt != null : this;
        assert !dummy && !forcePreload : this;

        ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx, exchId.topologyVersion());

        oldestNode.set(oldest);

        startCaches();

        // True if client node joined or failed.
        boolean clientNodeEvt;

        if (F.isEmpty(reqs)) {
          int type = discoEvt.type();

          assert type == EVT_NODE_JOINED || type == EVT_NODE_LEFT || type == EVT_NODE_FAILED
              : discoEvt;

          clientNodeEvt = CU.clientNode(discoEvt.eventNode());
        } else {
          assert discoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : discoEvt;

          boolean clientOnlyStart = true;

          for (DynamicCacheChangeRequest req : reqs) {
            if (!req.clientStartOnly()) {
              clientOnlyStart = false;

              break;
            }
          }

          clientNodeEvt = clientOnlyStart;
        }

        if (clientNodeEvt) {
          ClusterNode node = discoEvt.eventNode();

          // Client need to initialize affinity for local join event or for stated client caches.
          if (!node.isLocal()) {
            for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
              if (cacheCtx.isLocal()) continue;

              GridDhtPartitionTopology top = cacheCtx.topology();

              top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));

              if (cacheCtx.affinity().affinityTopologyVersion() == AffinityTopologyVersion.NONE) {
                initTopology(cacheCtx);

                top.beforeExchange(this);
              } else
                cacheCtx.affinity().clientEventTopologyChange(discoEvt, exchId.topologyVersion());
            }

            if (exchId.isLeft())
              cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

            onDone(exchId.topologyVersion());

            skipPreload = cctx.kernalContext().clientNode();

            return;
          }
        }

        if (cctx.kernalContext().clientNode()) {
          skipPreload = true;

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            GridDhtPartitionTopology top = cacheCtx.topology();

            top.updateTopologyVersion(exchId, this, -1, stopping(cacheCtx.cacheId()));
          }

          for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
            if (cacheCtx.isLocal()) continue;

            initTopology(cacheCtx);
          }

          if (oldestNode.get() != null) {
            rmtNodes =
                new ConcurrentLinkedQueue<>(
                    CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

            rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

            ready.set(true);

            initFut.onDone(true);

            if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

            sendPartitions();
          } else onDone(exchId.topologyVersion());

          return;
        }

        assert oldestNode.get() != null;

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (isCacheAdded(cacheCtx.cacheId(), exchId.topologyVersion())) {
            if (cacheCtx
                .discovery()
                .cacheAffinityNodes(cacheCtx.name(), topologyVersion())
                .isEmpty())
              U.quietAndWarn(log, "No server nodes found for cache client: " + cacheCtx.namex());
          }

          cacheCtx.preloader().onExchangeFutureAdded();
        }

        List<String> cachesWithoutNodes = null;

        if (exchId.isLeft()) {
          for (String name : cctx.cache().cacheNames()) {
            if (cctx.discovery().cacheAffinityNodes(name, topologyVersion()).isEmpty()) {
              if (cachesWithoutNodes == null) cachesWithoutNodes = new ArrayList<>();

              cachesWithoutNodes.add(name);

              // Fire event even if there is no client cache started.
              if (cctx.gridEvents().isRecordable(EventType.EVT_CACHE_NODES_LEFT)) {
                Event evt =
                    new CacheEvent(
                        name,
                        cctx.localNode(),
                        cctx.localNode(),
                        "All server nodes have left the cluster.",
                        EventType.EVT_CACHE_NODES_LEFT,
                        0,
                        false,
                        null,
                        null,
                        null,
                        null,
                        false,
                        null,
                        false,
                        null,
                        null,
                        null);

                cctx.gridEvents().record(evt);
              }
            }
          }
        }

        if (cachesWithoutNodes != null) {
          StringBuilder sb =
              new StringBuilder(
                  "All server nodes for the following caches have left the cluster: ");

          for (int i = 0; i < cachesWithoutNodes.size(); i++) {
            String cache = cachesWithoutNodes.get(i);

            sb.append('\'').append(cache).append('\'');

            if (i != cachesWithoutNodes.size() - 1) sb.append(", ");
          }

          U.quietAndWarn(log, sb.toString());

          U.quietAndWarn(log, "Must have server nodes for caches to operate.");
        }

        assert discoEvt != null;

        assert exchId.nodeId().equals(discoEvt.eventNode().id());

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          GridClientPartitionTopology clientTop =
              cctx.exchange().clearClientTopology(cacheCtx.cacheId());

          long updSeq = clientTop == null ? -1 : clientTop.lastUpdateSequence();

          // Update before waiting for locks.
          if (!cacheCtx.isLocal())
            cacheCtx
                .topology()
                .updateTopologyVersion(exchId, this, updSeq, stopping(cacheCtx.cacheId()));
        }

        // Grab all alive remote nodes with order of equal or less than last joined node.
        rmtNodes =
            new ConcurrentLinkedQueue<>(
                CU.aliveRemoteServerNodesWithCaches(cctx, exchId.topologyVersion()));

        rmtIds = Collections.unmodifiableSet(new HashSet<>(F.nodeIds(rmtNodes)));

        for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> m : singleMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        for (Map.Entry<UUID, GridDhtPartitionsFullMessage> m : fullMsgs.entrySet())
          // If received any messages, process them.
          onReceive(m.getKey(), m.getValue());

        AffinityTopologyVersion topVer = exchId.topologyVersion();

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Must initialize topology after we get discovery event.
          initTopology(cacheCtx);

          cacheCtx.preloader().updateLastExchangeFuture(this);
        }

        IgniteInternalFuture<?> partReleaseFut = cctx.partitionReleaseFuture(topVer);

        // Assign to class variable so it will be included into toString() method.
        this.partReleaseFut = partReleaseFut;

        if (log.isDebugEnabled()) log.debug("Before waiting for partition release future: " + this);

        while (true) {
          try {
            partReleaseFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            // Print pending transactions and locks that might have led to hang.
            dumpPendingObjects();
          }
        }

        if (log.isDebugEnabled()) log.debug("After waiting for partition release future: " + this);

        if (!F.isEmpty(reqs)) blockGateways();

        if (exchId.isLeft())
          cctx.mvcc().removeExplicitNodeLocks(exchId.nodeId(), exchId.topologyVersion());

        IgniteInternalFuture<?> locksFut = cctx.mvcc().finishLocks(exchId.topologyVersion());

        while (true) {
          try {
            locksFut.get(2 * cctx.gridConfig().getNetworkTimeout(), TimeUnit.MILLISECONDS);

            break;
          } catch (IgniteFutureTimeoutCheckedException ignored) {
            U.warn(
                log,
                "Failed to wait for locks release future. "
                    + "Dumping pending objects that might be the cause: "
                    + cctx.localNodeId());

            U.warn(log, "Locked entries:");

            Map<IgniteTxKey, Collection<GridCacheMvccCandidate>> locks =
                cctx.mvcc().unfinishedLocks(exchId.topologyVersion());

            for (Map.Entry<IgniteTxKey, Collection<GridCacheMvccCandidate>> e : locks.entrySet())
              U.warn(log, "Locked entry [key=" + e.getKey() + ", mvcc=" + e.getValue() + ']');
          }
        }

        for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
          if (cacheCtx.isLocal()) continue;

          // Notify replication manager.
          GridCacheContext drCacheCtx =
              cacheCtx.isNear() ? cacheCtx.near().dht().context() : cacheCtx;

          if (drCacheCtx.isDrEnabled()) drCacheCtx.dr().beforeExchange(topVer, exchId.isLeft());

          // Partition release future is done so we can flush the write-behind store.
          cacheCtx.store().forceFlush();

          // Process queued undeploys prior to sending/spreading map.
          cacheCtx.preloader().unwindUndeploys();

          GridDhtPartitionTopology top = cacheCtx.topology();

          assert topVer.equals(top.topologyVersion())
              : "Topology version is updated only in this class instances inside single ExchangeWorker thread.";

          top.beforeExchange(this);
        }

        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
          top.updateTopologyVersion(exchId, this, -1, stopping(top.cacheId()));

          top.beforeExchange(this);
        }
      } catch (IgniteInterruptedCheckedException e) {
        onDone(e);

        throw e;
      } catch (Throwable e) {
        U.error(
            log,
            "Failed to reinitialize local partitions (preloading will be stopped): " + exchId,
            e);

        onDone(e);

        if (e instanceof Error) throw (Error) e;

        return;
      }

      if (F.isEmpty(rmtIds)) {
        onDone(exchId.topologyVersion());

        return;
      }

      ready.set(true);

      initFut.onDone(true);

      if (log.isDebugEnabled()) log.debug("Initialized future: " + this);

      // If this node is not oldest.
      if (!oldestNode.get().id().equals(cctx.localNodeId())) sendPartitions();
      else {
        boolean allReceived = allReceived();

        if (allReceived && replied.compareAndSet(false, true)) {
          if (spreadPartitions()) onDone(exchId.topologyVersion());
        }
      }

      scheduleRecheck();
    } else assert false : "Skipped init future: " + this;
  }
  /** {@inheritDoc} */
  @Override
  public int countDown(int val) throws GridException {
    A.ensure(val > 0, "val should be positive");

    return CU.outTx(new CountDownCallable(val), ctx);
  }
 /** {@inheritDoc} */
 @Override
 public void countDownAll() throws GridException {
   CU.outTx(new CountDownCallable(0), ctx);
 }
Exemplo n.º 23
0
    /** @param res Result callback. */
    void onResult(GridNearLockResponse<K, V> res) {
      if (rcvRes.compareAndSet(false, true)) {
        if (res.error() != null) {
          if (log.isDebugEnabled())
            log.debug(
                "Finishing mini future with an error due to error in response [miniFut="
                    + this
                    + ", res="
                    + res
                    + ']');

          // Fail.
          if (res.error() instanceof GridCacheLockTimeoutException) onDone(false);
          else onDone(res.error());

          return;
        }

        int i = 0;

        long topVer = topSnapshot.get().topologyVersion();

        for (K k : keys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = cctx.near().entryExx(k, topVer);

            try {
              if (res.dhtVersion(i) == null) {
                onDone(
                    new GridException(
                        "Failed to receive DHT version from remote node "
                            + "(will fail the lock): "
                            + res));

                return;
              }

              GridTuple3<GridCacheVersion, V, byte[]> oldValTup = valMap.get(entry.key());

              V oldVal = entry.rawGet();
              boolean hasOldVal = false;
              V newVal = res.value(i);
              byte[] newBytes = res.valueBytes(i);

              boolean readRecordable = false;

              if (retval) {
                readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ);

                if (readRecordable) hasOldVal = entry.hasValue();
              }

              GridCacheVersion dhtVer = res.dhtVersion(i);
              GridCacheVersion mappedVer = res.mappedVersion(i);

              if (newVal == null) {
                if (oldValTup != null) {
                  if (oldValTup.get1().equals(dhtVer)) {
                    newVal = oldValTup.get2();

                    newBytes = oldValTup.get3();
                  }

                  oldVal = oldValTup.get2();
                }
              }

              // Lock is held at this point, so we can set the
              // returned value if any.
              entry.resetFromPrimary(newVal, newBytes, lockVer, dhtVer, node.id());

              if (inTx() && implicitTx() && tx.onePhaseCommit()) {
                boolean pass = res.filterResult(i);

                tx.entry(k).filters(pass ? CU.<K, V>empty() : CU.<K, V>alwaysFalse());
              }

              entry.readyNearLock(
                  lockVer,
                  mappedVer,
                  res.committedVersions(),
                  res.rolledbackVersions(),
                  res.pending());

              if (retval) {
                if (readRecordable)
                  cctx.events()
                      .addEvent(
                          entry.partition(),
                          entry.key(),
                          tx,
                          null,
                          EVT_CACHE_OBJECT_READ,
                          newVal,
                          newVal != null || newBytes != null,
                          oldVal,
                          hasOldVal,
                          CU.subjectId(tx, cctx));

                cctx.cache().metrics0().onRead(false);
              }

              if (log.isDebugEnabled())
                log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']');

              break; // Inner while loop.
            } catch (GridCacheEntryRemovedException ignored) {
              if (log.isDebugEnabled())
                log.debug("Failed to add candidates because entry was removed (will renew).");

              // Replace old entry with new one.
              entries.set(i, (GridDistributedCacheEntry<K, V>) cctx.cache().entryEx(entry.key()));
            } catch (GridException e) {
              onDone(e);

              return;
            }
          }

          i++;
        }

        try {
          proceedMapping(mappings);
        } catch (GridException e) {
          onDone(e);
        }

        onDone(true);
      }
    }
Exemplo n.º 24
0
  /**
   * Maps keys to nodes. Note that we can not simply group keys by nodes and send lock request as
   * such approach does not preserve order of lock acquisition. Instead, keys are split in
   * continuous groups belonging to one primary node and locks for these groups are acquired
   * sequentially.
   *
   * @param keys Keys.
   */
  private void map(Iterable<? extends K> keys) {
    try {
      GridDiscoveryTopologySnapshot snapshot = topSnapshot.get();

      assert snapshot != null;

      long topVer = snapshot.topologyVersion();

      assert topVer > 0;

      if (CU.affinityNodes(cctx, topVer).isEmpty()) {
        onDone(
            new GridTopologyException(
                "Failed to map keys for near-only cache (all "
                    + "partition nodes left the grid)."));

        return;
      }

      ConcurrentLinkedDeque8<GridNearLockMapping<K, V>> mappings = new ConcurrentLinkedDeque8<>();

      // Assign keys to primary nodes.
      GridNearLockMapping<K, V> map = null;

      for (K key : keys) {
        GridNearLockMapping<K, V> updated = map(key, map, topVer);

        // If new mapping was created, add to collection.
        if (updated != map) mappings.add(updated);

        map = updated;
      }

      if (isDone()) {
        if (log.isDebugEnabled()) log.debug("Abandoning (re)map because future is done: " + this);

        return;
      }

      if (log.isDebugEnabled())
        log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');

      // Create mini futures.
      for (Iterator<GridNearLockMapping<K, V>> iter = mappings.iterator(); iter.hasNext(); ) {
        GridNearLockMapping<K, V> mapping = iter.next();

        GridNode node = mapping.node();
        Collection<K> mappedKeys = mapping.mappedKeys();

        assert !mappedKeys.isEmpty();

        GridNearLockRequest<K, V> req = null;

        Collection<K> distributedKeys = new ArrayList<>(mappedKeys.size());

        boolean explicit = false;

        for (K key : mappedKeys) {
          while (true) {
            GridNearCacheEntry<K, V> entry = null;

            try {
              entry = cctx.near().entryExx(key, topVer);

              if (!cctx.isAll(entry.wrap(false), filter)) {
                if (log.isDebugEnabled())
                  log.debug("Entry being locked did not pass filter (will not lock): " + entry);

                onComplete(false, false);

                return;
              }

              // Removed exception may be thrown here.
              GridCacheMvccCandidate<K> cand = addEntry(topVer, entry, node.id());

              if (isDone()) {
                if (log.isDebugEnabled())
                  log.debug(
                      "Abandoning (re)map because future is done after addEntry attempt "
                          + "[fut="
                          + this
                          + ", entry="
                          + entry
                          + ']');

                return;
              }

              if (cand != null) {
                if (tx == null && !cand.reentry())
                  cctx.mvcc().addExplicitLock(threadId, cand, snapshot);

                GridTuple3<GridCacheVersion, V, byte[]> val = entry.versionedValue();

                if (val == null) {
                  GridDhtCacheEntry<K, V> dhtEntry = dht().peekExx(key);

                  try {
                    if (dhtEntry != null) val = dhtEntry.versionedValue(topVer);
                  } catch (GridCacheEntryRemovedException ignored) {
                    assert dhtEntry.obsolete()
                        : " Got removed exception for non-obsolete entry: " + dhtEntry;

                    if (log.isDebugEnabled())
                      log.debug(
                          "Got removed exception for DHT entry in map (will ignore): " + dhtEntry);
                  }
                }

                GridCacheVersion dhtVer = null;

                if (val != null) {
                  dhtVer = val.get1();

                  valMap.put(key, val);
                }

                if (!cand.reentry()) {
                  if (req == null) {
                    req =
                        new GridNearLockRequest<>(
                            topVer,
                            cctx.nodeId(),
                            threadId,
                            futId,
                            lockVer,
                            inTx(),
                            implicitTx(),
                            implicitSingleTx(),
                            read,
                            isolation(),
                            isInvalidate(),
                            timeout,
                            syncCommit(),
                            syncRollback(),
                            mappedKeys.size(),
                            inTx() ? tx.size() : mappedKeys.size(),
                            inTx() ? tx.groupLockKey() : null,
                            inTx() && tx.partitionLock(),
                            inTx() ? tx.subjectId() : null);

                    mapping.request(req);
                  }

                  distributedKeys.add(key);

                  GridCacheTxEntry<K, V> writeEntry = tx != null ? tx.writeMap().get(key) : null;

                  if (tx != null) tx.addKeyMapping(key, mapping.node());

                  req.addKeyBytes(
                      key,
                      node.isLocal() ? null : entry.getOrMarshalKeyBytes(),
                      retval && dhtVer == null,
                      dhtVer, // Include DHT version to match remote DHT entry.
                      writeEntry,
                      inTx() ? tx.entry(key).drVersion() : null,
                      cctx);

                  // Clear transfer required flag since we are sending message.
                  if (writeEntry != null) writeEntry.transferRequired(false);
                }

                if (cand.reentry())
                  explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());
              } else
                // Ignore reentries within transactions.
                explicit = tx != null && !entry.hasLockCandidate(tx.xidVersion());

              if (explicit) tx.addKeyMapping(key, mapping.node());

              break;
            } catch (GridCacheEntryRemovedException ignored) {
              assert entry.obsolete() : "Got removed exception on non-obsolete entry: " + entry;

              if (log.isDebugEnabled())
                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
            }
          }

          // Mark mapping explicit lock flag.
          if (explicit) {
            boolean marked = tx != null && tx.markExplicit(node.id());

            assert tx == null || marked;
          }
        }

        if (!distributedKeys.isEmpty()) mapping.distributedKeys(distributedKeys);
        else {
          assert mapping.request() == null;

          iter.remove();
        }
      }

      cctx.mvcc().recheckPendingLocks();

      proceedMapping(mappings);
    } catch (GridException ex) {
      onError(ex);
    }
  }
  /**
   * Synchronous sequence update operation. Will add given amount to the sequence value.
   *
   * @param l Increment amount.
   * @param updateCall Cache call that will update sequence reservation count in accordance with l.
   * @param updated If {@code true}, will return sequence value after update, otherwise will return
   *     sequence value prior to update.
   * @return Sequence value.
   * @throws GridException If update failed.
   */
  private long internalUpdate(long l, @Nullable Callable<Long> updateCall, boolean updated)
      throws GridException {
    checkRemoved();

    assert l > 0;

    lock.lock();

    try {
      // If reserved range isn't exhausted.
      if (locVal + l <= upBound) {
        long curVal = locVal;

        locVal += l;

        return updated ? locVal : curVal;
      }
    } finally {
      lock.unlock();
    }

    if (updateCall == null) updateCall = internalUpdate(l, updated);

    while (true) {
      if (updateGuard.compareAndSet(false, true)) {
        try {
          // This call must be outside lock.
          return CU.outTx(updateCall, ctx);
        } finally {
          lock.lock();

          try {
            updateGuard.set(false);

            cond.signalAll();
          } finally {
            lock.unlock();
          }
        }
      } else {
        lock.lock();

        try {
          while (locVal >= upBound && updateGuard.get()) {
            try {
              cond.await(500, MILLISECONDS);
            } catch (InterruptedException e) {
              throw new GridInterruptedException(e);
            }
          }

          checkRemoved();

          // If reserved range isn't exhausted.
          if (locVal + l <= upBound) {
            long curVal = locVal;

            locVal += l;

            return updated ? locVal : curVal;
          }
        } finally {
          lock.unlock();
        }
      }
    }
  }