/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
  /** @param node Node to remove. */
  public void removeMappedNode(GridNode node) {
    if (mappedDhtNodes.contains(node))
      mappedDhtNodes = new ArrayList<>(F.view(mappedDhtNodes, F.notEqualTo(node)));

    if (mappedNearNodes != null && mappedNearNodes.contains(node))
      mappedNearNodes = new ArrayList<>(F.view(mappedNearNodes, F.notEqualTo(node)));
  }
    /** @param nodes Nodes. */
    private Queue<ClusterNode> fallbacks(Collection<ClusterNode> nodes) {
      Queue<ClusterNode> fallbacks = new LinkedList<>();

      ClusterNode node = F.first(F.view(nodes, IS_LOC_NODE));

      if (node != null) fallbacks.add(node);

      fallbacks.addAll(node != null ? F.view(nodes, F.not(IS_LOC_NODE)) : nodes);

      return fallbacks;
    }
  /** {@inheritDoc} */
  @Override
  public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds)
      throws GridSpiException {
    assert !F.isEmpty(nodeIds);

    long now = U.currentTimeMillis();

    Collection<UUID> expired = new LinkedList<>();

    for (UUID id : nodeIds) {
      GridNodeMetrics nodeMetrics = metricsMap.get(id);

      Long ts = tsMap.get(id);

      if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id);
    }

    if (!expired.isEmpty()) {
      Map<UUID, GridNodeMetrics> refreshed = metrics0(expired);

      for (UUID id : refreshed.keySet()) tsMap.put(id, now);

      metricsMap.putAll(refreshed);
    }

    return F.view(metricsMap, F.contains(nodeIds));
  }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
Example #6
0
  /**
   * @param keys keys.
   * @param topVer Topology version.
   * @return Nodes for the keys.
   */
  public Collection<GridNode> remoteNodes(Iterable<? extends K> keys, long topVer) {
    Collection<Collection<GridNode>> colcol = new GridLeanSet<>();

    for (K key : keys) colcol.add(nodes(key, topVer));

    return F.view(F.flatCollections(colcol), F.remoteNodes(cctx.localNodeId()));
  }
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<ClusterNode> nodes(
      final GridCacheContext<?, ?> cctx,
      @Nullable final ClusterGroup prj,
      @Nullable final Integer part) {
    assert cctx != null;

    final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();

    Collection<ClusterNode> affNodes = CU.affinityNodes(cctx);

    if (prj == null && part == null) return affNodes;

    final Set<ClusterNode> owners =
        part == null
            ? Collections.<ClusterNode>emptySet()
            : new HashSet<>(cctx.topology().owners(part, topVer));

    return F.view(
        affNodes,
        new P1<ClusterNode>() {
          @Override
          public boolean apply(ClusterNode n) {

            return cctx.discovery().cacheAffinityNode(n, cctx.name())
                && (prj == null || prj.node(n.id()) != null)
                && (part == null || owners.contains(n));
          }
        });
  }
Example #8
0
  /**
   * @param part Partition.
   * @param topVer Topology version.
   * @return Backup nodes.
   */
  public Collection<GridNode> backups(int part, long topVer) {
    Collection<GridNode> nodes = nodes(part, topVer);

    assert !F.isEmpty(nodes);

    if (nodes.size() <= 1) return Collections.emptyList();

    return F.view(nodes, F.notEqualTo(nodes.iterator().next()));
  }
 /**
  * @param g Grid.
  * @return Non-system caches.
  */
 private Collection<GridCacheConfiguration> caches(Grid g) {
   return F.view(
       Arrays.asList(g.configuration().getCacheConfiguration()),
       new GridPredicate<GridCacheConfiguration>() {
         @Override
         public boolean apply(GridCacheConfiguration c) {
           return c.getName() == null || !c.getName().equals(CU.UTILITY_CACHE_NAME);
         }
       });
 }
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
 /**
  * Starts dynamic caches.
  *
  * @throws IgniteCheckedException If failed.
  */
 private void startCaches() throws IgniteCheckedException {
   cctx.cache()
       .prepareCachesStart(
           F.view(
               reqs,
               new IgnitePredicate<DynamicCacheChangeRequest>() {
                 @Override
                 public boolean apply(DynamicCacheChangeRequest req) {
                   return req.start();
                 }
               }),
           exchId.topologyVersion());
 }
Example #12
0
  /**
   * Executes example.
   *
   * @param args Command line arguments, none required.
   */
  public static void main(String[] args) {
    // Typedefs:
    // ---------
    // G -> GridFactory
    // CI1 -> GridInClosure
    // CO -> GridOutClosure
    // CA -> GridAbsClosure
    // F -> GridFunc

    // Data initialisation.
    Random rand = new Random();

    final int size = 20;

    Collection<Integer> nums = new ArrayList<Integer>(size);

    // Generate list of random integers.
    for (int i = 0; i < size; i++) {
      nums.add(rand.nextInt(size));
    }

    // Print generated list.
    X.println("Generated list:");

    F.forEach(nums, F.<Integer>print("", " "));

    // Get new unmodifiable collection with elements which value low than half generated list size.
    Collection<Integer> view =
        F.view(
            nums,
            new P1<Integer>() {
              @Override
              public boolean apply(Integer i) {
                return i < size / 2;
              }
            });

    // Print result.
    X.println("\nResult list:");

    F.forEach(view, F.<Integer>print("", " "));

    // Check for read only.
    try {
      view.add(12);
    } catch (Exception ignored) {
      X.println("\nView is read only.");
    }
  }
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<GridNode> nodes(
      final GridCacheContext<?, ?> cctx, @Nullable final GridProjection prj) {
    assert cctx != null;

    return F.view(
        CU.allNodes(cctx),
        new P1<GridNode>() {
          @Override
          public boolean apply(GridNode n) {
            GridCacheDistributionMode mode = U.distributionMode(n, cctx.name());

            return (mode == PARTITIONED_ONLY || mode == NEAR_PARTITIONED)
                && (prj == null || prj.node(n.id()) != null);
          }
        });
  }
  /** {@inheritDoc} */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  @Nullable
  @Override
  public GridDhtPartitionMap update(
      @Nullable GridDhtPartitionExchangeId exchId, GridDhtPartitionMap parts) {
    if (log.isDebugEnabled())
      log.debug(
          "Updating single partition map [exchId=" + exchId + ", parts=" + mapString(parts) + ']');

    if (!cctx.discovery().alive(parts.nodeId())) {
      if (log.isDebugEnabled())
        log.debug(
            "Received partition update for non-existing node (will ignore) [exchId="
                + exchId
                + ", parts="
                + parts
                + ']');

      return null;
    }

    lock.writeLock().lock();

    try {
      if (stopping) return null;

      if (lastExchangeId != null && exchId != null && lastExchangeId.compareTo(exchId) > 0) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale exchange id for single partition map update (will ignore) [lastExchId="
                  + lastExchangeId
                  + ", exchId="
                  + exchId
                  + ']');

        return null;
      }

      if (exchId != null) lastExchangeId = exchId;

      if (node2part == null)
        // Create invalid partition map.
        node2part = new GridDhtPartitionFullMap();

      GridDhtPartitionMap cur = node2part.get(parts.nodeId());

      if (cur != null && cur.updateSequence() >= parts.updateSequence()) {
        if (log.isDebugEnabled())
          log.debug(
              "Stale update sequence for single partition map update (will ignore) [exchId="
                  + exchId
                  + ", curSeq="
                  + cur.updateSequence()
                  + ", newSeq="
                  + parts.updateSequence()
                  + ']');

        return null;
      }

      long updateSeq = this.updateSeq.incrementAndGet();

      node2part = new GridDhtPartitionFullMap(node2part, updateSeq);

      boolean changed = false;

      if (cur == null || !cur.equals(parts)) changed = true;

      node2part.put(parts.nodeId(), parts);

      part2node = new HashMap<>(part2node);

      // Add new mappings.
      for (Integer p : parts.keySet()) {
        Set<UUID> ids = part2node.get(p);

        if (ids == null)
          // Initialize HashSet to size 3 in anticipation that there won't be
          // more than 3 nodes per partition.
          part2node.put(p, ids = U.newHashSet(3));

        changed |= ids.add(parts.nodeId());
      }

      // Remove obsolete mappings.
      if (cur != null) {
        for (Integer p : F.view(cur.keySet(), F0.notIn(parts.keySet()))) {
          Set<UUID> ids = part2node.get(p);

          if (ids != null) changed |= ids.remove(parts.nodeId());
        }
      }

      changed |= checkEvictions(updateSeq);

      consistencyCheck();

      if (log.isDebugEnabled()) log.debug("Partition map after single update: " + fullMapString());

      return changed ? localPartitionMap() : null;
    } finally {
      lock.writeLock().unlock();
    }
  }