コード例 #1
0
    /** @param nodes Nodes. */
    private Queue<ClusterNode> fallbacks(Collection<ClusterNode> nodes) {
      Queue<ClusterNode> fallbacks = new LinkedList<>();

      ClusterNode node = F.first(F.view(nodes, IS_LOC_NODE));

      if (node != null) fallbacks.add(node);

      fallbacks.addAll(node != null ? F.view(nodes, F.not(IS_LOC_NODE)) : nodes);

      return fallbacks;
    }
コード例 #2
0
    /**
     * @param topVer Topology version.
     * @param entries Entries.
     */
    FinishLockFuture(Iterable<GridDistributedCacheEntry> entries, AffinityTopologyVersion topVer) {
      assert topVer.compareTo(AffinityTopologyVersion.ZERO) > 0;

      this.topVer = topVer;

      for (GridCacheEntryEx entry : entries) {
        // Either local or near local candidates.
        try {
          Collection<GridCacheMvccCandidate> locs = entry.localCandidates();

          if (!F.isEmpty(locs)) {
            Collection<GridCacheMvccCandidate> cands = new ConcurrentLinkedQueue<>();

            cands.addAll(F.view(locs, versionFilter()));

            if (!F.isEmpty(cands)) pendingLocks.put(entry.txKey(), cands);
          }
        } catch (GridCacheEntryRemovedException ignored) {
          if (exchLog.isDebugEnabled())
            exchLog.debug(
                "Got removed entry when adding it to finish lock future (will ignore): " + entry);
        }
      }

      if (exchLog.isDebugEnabled())
        exchLog.debug("Pending lock set [topVer=" + topVer + ", locks=" + pendingLocks + ']');
    }
コード例 #3
0
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<ClusterNode> nodes(
      final GridCacheContext<?, ?> cctx,
      @Nullable final ClusterGroup prj,
      @Nullable final Integer part) {
    assert cctx != null;

    final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();

    Collection<ClusterNode> affNodes = CU.affinityNodes(cctx);

    if (prj == null && part == null) return affNodes;

    final Set<ClusterNode> owners =
        part == null
            ? Collections.<ClusterNode>emptySet()
            : new HashSet<>(cctx.topology().owners(part, topVer));

    return F.view(
        affNodes,
        new P1<ClusterNode>() {
          @Override
          public boolean apply(ClusterNode n) {

            return cctx.discovery().cacheAffinityNode(n, cctx.name())
                && (prj == null || prj.node(n.id()) != null)
                && (part == null || owners.contains(n));
          }
        });
  }
コード例 #4
0
 /**
  * Starts dynamic caches.
  *
  * @throws IgniteCheckedException If failed.
  */
 private void startCaches() throws IgniteCheckedException {
   cctx.cache()
       .prepareCachesStart(
           F.view(
               reqs,
               new IgnitePredicate<DynamicCacheChangeRequest>() {
                 @Override
                 public boolean apply(DynamicCacheChangeRequest req) {
                   return req.start();
                 }
               }),
           exchId.topologyVersion());
 }
コード例 #5
0
  /**
   * @param keyFilter Key filter.
   * @param topVer Topology version.
   * @return Future that signals when all locks for given partitions will be released.
   */
  private IgniteInternalFuture<?> finishLocks(
      @Nullable final IgnitePredicate<KeyCacheObject> keyFilter, AffinityTopologyVersion topVer) {
    assert topVer.topologyVersion() != 0;

    if (topVer.equals(AffinityTopologyVersion.NONE)) return new GridFinishedFuture();

    final FinishLockFuture finishFut =
        new FinishLockFuture(
            keyFilter == null
                ? locked()
                : F.view(
                    locked(),
                    new P1<GridDistributedCacheEntry>() {
                      @Override
                      public boolean apply(GridDistributedCacheEntry e) {
                        return F.isAll(e.key(), keyFilter);
                      }
                    }),
            topVer);

    finishFuts.add(finishFut);

    finishFut.listen(
        new CI1<IgniteInternalFuture<?>>() {
          @Override
          public void apply(IgniteInternalFuture<?> e) {
            finishFuts.remove(finishFut);

            // This call is required to make sure that the concurrent queue
            // clears memory occupied by internal nodes.
            finishFuts.peek();
          }
        });

    finishFut.recheck();

    return finishFut;
  }
コード例 #6
0
    /** @param res Result callback. */
    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
    void onResult(final GridNearGetResponse res) {
      final Collection<Integer> invalidParts = res.invalidPartitions();

      // If error happened on remote node, fail the whole future.
      if (res.error() != null) {
        onDone(res.error());

        return;
      }

      // Remap invalid partitions.
      if (!F.isEmpty(invalidParts)) {
        AffinityTopologyVersion rmtTopVer = res.topologyVersion();

        assert !rmtTopVer.equals(AffinityTopologyVersion.ZERO);

        if (rmtTopVer.compareTo(topVer) <= 0) {
          // Fail the whole get future.
          onDone(
              new IgniteCheckedException(
                  "Failed to process invalid partitions response (remote node reported "
                      + "invalid partitions but remote topology version does not differ from local) "
                      + "[topVer="
                      + topVer
                      + ", rmtTopVer="
                      + rmtTopVer
                      + ", invalidParts="
                      + invalidParts
                      + ", nodeId="
                      + node.id()
                      + ']'));

          return;
        }

        if (log.isDebugEnabled())
          log.debug(
              "Remapping mini get future [invalidParts=" + invalidParts + ", fut=" + this + ']');

        if (!canRemap) {
          map(
              F.view(
                  keys.keySet(),
                  new P1<KeyCacheObject>() {
                    @Override
                    public boolean apply(KeyCacheObject key) {
                      return invalidParts.contains(cctx.affinity().partition(key));
                    }
                  }),
              F.t(node, keys),
              topVer);

          onDone(createResultMap(res.entries()));

          return;
        }

        // Need to wait for next topology version to remap.
        IgniteInternalFuture<AffinityTopologyVersion> topFut =
            cctx.affinity().affinityReadyFuture(rmtTopVer);

        topFut.listen(
            new CIX1<IgniteInternalFuture<AffinityTopologyVersion>>() {
              @SuppressWarnings("unchecked")
              @Override
              public void applyx(IgniteInternalFuture<AffinityTopologyVersion> fut)
                  throws IgniteCheckedException {
                AffinityTopologyVersion topVer = fut.get();

                // This will append new futures to compound list.
                map(
                    F.view(
                        keys.keySet(),
                        new P1<KeyCacheObject>() {
                          @Override
                          public boolean apply(KeyCacheObject key) {
                            return invalidParts.contains(cctx.affinity().partition(key));
                          }
                        }),
                    F.t(node, keys),
                    topVer);

                onDone(createResultMap(res.entries()));
              }
            });
      } else {
        try {
          onDone(createResultMap(res.entries()));
        } catch (Exception e) {
          onDone(e);
        }
      }
    }
コード例 #7
0
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');

    if (!cctx.discovery().alive(parts.nodeId())) {
      if (log.isDebugEnabled())
        log.debug(
            "Received partition update for non-existing node (will ignore) [exchId="
                + exchId
                + ", parts="
                + parts
                + ']');

      return null;
    }

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (lastExchangeId != null && exchId != null && lastExchangeId.compareTo(exchId) > 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for single partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (exchId != null) lastExchangeId = exchId;

      if (node2part == null)
        // Create invalid partition map.
        node2part = new GridDhtPartitionFullMap();

      GridDhtPartitionMap cur = node2part.get(parts.nodeId());

      if (cur != null && cur.updateSequence() >= parts.updateSequence()) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale update sequence for single partition map update (will ignore) [exchId="
                  + exchId
                  + ", curSeq="
                  + cur.updateSequence()
                  + ", newSeq="
                  + parts.updateSequence()
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      node2part = new GridDhtPartitionFullMap(node2part, updateSeq);

      boolean changed = false;

      if (cur == null || !cur.equals(parts)) changed = true;

      node2part.put(parts.nodeId(), parts);

      part2node = new HashMap<>(part2node);

      // Add new mappings.
      for (Integer p : parts.keySet()) {
        Set<UUID> ids = part2node.get(p);

        if (ids == null)
          // Initialize HashSet to size 3 in anticipation that there won't be
          // more than 3 nodes per partition.
          part2node.put(p, ids = U.newHashSet(3));

        changed |= ids.add(parts.nodeId());
      }

      // Remove obsolete mappings.
      if (cur != null) {
        for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
          Set<UUID> ids = part2node.get(p);

          if (ids != null) changed |= ids.remove(parts.nodeId());
        }
      }

      changed |= checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after single update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }