/** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys,
      @Nullable GridPredicate<? super GridCacheEntry<K, V>>[] filter) {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map = peekAll0(keys, filter, skipped);

    if (map.size() + skipped.size() != keys.size()) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              filter));
    }

    return map;
  }
  /**
   * @param nodes Nodes.
   * @param id ID.
   * @throws IgniteCheckedException If failed.
   */
  private void sendAllPartitions(
      Collection<? extends ClusterNode> nodes, GridDhtPartitionExchangeId id)
      throws IgniteCheckedException {
    GridDhtPartitionsFullMessage m =
        new GridDhtPartitionsFullMessage(id, lastVer.get(), id.topologyVersion());

    for (GridCacheContext cacheCtx : cctx.cacheContexts()) {
      if (!cacheCtx.isLocal()) {
        AffinityTopologyVersion startTopVer = cacheCtx.startTopologyVersion();

        boolean ready = startTopVer == null || startTopVer.compareTo(id.topologyVersion()) <= 0;

        if (ready)
          m.addFullPartitionsMap(cacheCtx.cacheId(), cacheCtx.topology().partitionMap(true));
      }
    }

    // It is important that client topologies be added after contexts.
    for (GridClientPartitionTopology top : cctx.exchange().clientTopologies())
      m.addFullPartitionsMap(top.cacheId(), top.partitionMap(true));

    if (log.isDebugEnabled())
      log.debug(
          "Sending full partition map [nodeIds="
              + F.viewReadOnly(nodes, F.node2id())
              + ", exchId="
              + exchId
              + ", msg="
              + m
              + ']');

    cctx.io().safeSend(nodes, m, SYSTEM_POOL, null);
  }
  /** {@inheritDoc} */
  @Override
  public final Map<UUID, GridNodeMetrics> metrics(Collection<UUID> nodeIds)
      throws GridSpiException {
    assert !F.isEmpty(nodeIds);

    long now = U.currentTimeMillis();

    Collection<UUID> expired = new LinkedList<>();

    for (UUID id : nodeIds) {
      GridNodeMetrics nodeMetrics = metricsMap.get(id);

      Long ts = tsMap.get(id);

      if (nodeMetrics == null || ts == null || ts < now - metricsExpireTime) expired.add(id);
    }

    if (!expired.isEmpty()) {
      Map<UUID, GridNodeMetrics> refreshed = metrics0(expired);

      for (UUID id : refreshed.keySet()) tsMap.put(id, now);

      metricsMap.putAll(refreshed);
    }

    return F.view(metricsMap, F.contains(nodeIds));
  }
Пример #4
0
  /**
   * @param keys keys.
   * @param topVer Topology version.
   * @return Nodes for the keys.
   */
  public Collection<GridNode> remoteNodes(Iterable<? extends K> keys, long topVer) {
    Collection<Collection<GridNode>> colcol = new GridLeanSet<>();

    for (K key : keys) colcol.add(nodes(key, topVer));

    return F.view(F.flatCollections(colcol), F.remoteNodes(cctx.localNodeId()));
  }
  /** @param node Node to remove. */
  public void removeMappedNode(GridNode node) {
    if (mappedDhtNodes.contains(node))
      mappedDhtNodes = new ArrayList<>(F.view(mappedDhtNodes, F.notEqualTo(node)));

    if (mappedNearNodes != null && mappedNearNodes.contains(node))
      mappedNearNodes = new ArrayList<>(F.view(mappedNearNodes, F.notEqualTo(node)));
  }
  /** @throws Exception If failed. */
  public void testCreateFileColocated() throws Exception {
    GridGgfsPath path = new GridGgfsPath("/colocated");

    UUID uuid = UUID.randomUUID();

    GridUuid affKey;

    long idx = 0;

    while (true) {
      affKey = new GridUuid(uuid, idx);

      if (grid(0).mapKeyToNode(DATA_CACHE_NAME, affKey).id().equals(grid(0).localNode().id()))
        break;

      idx++;
    }

    try (GridGgfsOutputStream out = fs.create(path, 1024, true, affKey, 0, 1024, null)) {
      // Write 5M, should be enough to test distribution.
      for (int i = 0; i < 15; i++) out.write(new byte[1024 * 1024]);
    }

    GridGgfsFile info = fs.info(path);

    Collection<GridGgfsBlockLocation> affNodes = fs.affinity(path, 0, info.length());

    assertEquals(1, affNodes.size());
    Collection<UUID> nodeIds = F.first(affNodes).nodeIds();

    assertEquals(1, nodeIds.size());
    assertEquals(grid(0).localNode().id(), F.first(nodeIds));
  }
Пример #7
0
  @SuppressWarnings("unchecked")
  @Override
  public INetSystem<F, N, P, T, M> clone(Map<N, N> map) {
    INetSystem<F, N, P, T, M> clone = null;
    try {
      clone = (INetSystem<F, N, P, T, M>) NetSystem.class.newInstance();
    } catch (InstantiationException exception) {
      return null;
    } catch (IllegalAccessException exception) {
      return null;
    }

    for (P p : this.getPlaces()) {
      P np = (P) p.clone();
      map.put((N) p, (N) np);
      clone.addPlace(np);
    }

    for (T t : this.getTransitions()) {
      T nt = (T) t.clone();
      map.put((N) t, (N) nt);
      clone.addTransition(nt);
    }

    for (F f : this.getFlow()) {
      clone.addFlow(map.get(f.getSource()), map.get(f.getTarget()));
    }

    for (P p : this.getPlaces()) {
      clone.putTokens((P) map.get(p), this.getTokens(p));
    }

    return clone;
  }
Пример #8
0
  /**
   * @param part Partition.
   * @param topVer Topology version.
   * @return Backup nodes.
   */
  public Collection<GridNode> backups(int part, long topVer) {
    Collection<GridNode> nodes = nodes(part, topVer);

    assert !F.isEmpty(nodes);

    if (nodes.size() <= 1) return Collections.emptyList();

    return F.view(nodes, F.notEqualTo(nodes.iterator().next()));
  }
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
    /** @param nodes Nodes. */
    private Queue<ClusterNode> fallbacks(Collection<ClusterNode> nodes) {
      Queue<ClusterNode> fallbacks = new LinkedList<>();

      ClusterNode node = F.first(F.view(nodes, IS_LOC_NODE));

      if (node != null) fallbacks.add(node);

      fallbacks.addAll(node != null ? F.view(nodes, F.not(IS_LOC_NODE)) : nodes);

      return fallbacks;
    }
Пример #11
0
  /**
   * Checks for explicit events configuration.
   *
   * @param ignite Grid instance.
   * @return {@code true} if all task events explicitly specified in configuration.
   */
  public static boolean checkExplicitTaskMonitoring(Ignite ignite) {
    int[] evts = ignite.configuration().getIncludeEventTypes();

    if (F.isEmpty(evts)) return false;

    for (int evt : VISOR_TASK_EVTS) {
      if (!F.contains(evts, evt)) return false;
    }

    return true;
  }
Пример #12
0
 /** {@inheritDoc} */
 @Override
 public void stopListenAsync(@Nullable GridInClosure<? super GridFuture<R>>... lsnr) {
   if (F.isEmpty(lsnr))
     synchronized (mux) {
       lsnrs.clear();
     }
   else
     synchronized (mux) {
       lsnrs.removeAll(F.asList(lsnr));
     }
 }
Пример #13
0
    public static <F extends FileBase> F resolveFileType(
            final F file,
            final SortedMap<String, FileType> fileTypes
    ) {
        file.setFileType(resolve(
                new TreeMap<String, FileType>(file.getFileType()),
                fileTypes
        ));

        return file;
    }
  /**
   * @param cacheId Cache ID.
   * @return {@code True} if local client has been added.
   */
  public boolean isLocalClientAdded(int cacheId) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && F.eq(req.initiatingNodeId(), cctx.localNodeId())) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    return false;
  }
  /**
   * @param cacheId Cache ID to check.
   * @param topVer Topology version.
   * @return {@code True} if cache was added during this exchange.
   */
  public boolean isCacheAdded(int cacheId, AffinityTopologyVersion topVer) {
    if (!F.isEmpty(reqs)) {
      for (DynamicCacheChangeRequest req : reqs) {
        if (req.start() && !req.clientStartOnly()) {
          if (CU.cacheId(req.cacheName()) == cacheId) return true;
        }
      }
    }

    GridCacheContext<?, ?> cacheCtx = cctx.cacheContext(cacheId);

    return cacheCtx != null && F.eq(cacheCtx.startTopologyVersion(), topVer);
  }
Пример #16
0
  @Override
  public String toDOT() {
    String result = "digraph G {\n";
    result +=
        "graph [fontname=\"Helvetica\" fontsize=\"10\" nodesep=\"0.35\" ranksep=\"0.25 equally\"];\n";
    result +=
        "node [fontname=\"Helvetica\" fontsize=\"10\" fixedsize=\"true\" style=\"filled\" fillcolor=\"white\" penwidth=\"2\"];\n";
    result +=
        "edge [fontname=\"Helvetica\" fontsize=\"10\" arrowhead=\"normal\" color=\"black\"];\n";
    result += "\n";
    result += "node [shape=circle];\n";

    for (P p : this.getPlaces()) {
      Integer n = this.marking.get(p);
      String label =
          ((n == 0) || (n == null)) ? p.getLabel() : p.getLabel() + "[" + n.toString() + "]";
      result +=
          String.format(
              "\tn%s[label=\"%s\" width=\".3\" height=\".3\"];\n",
              p.getId().replace("-", ""), label);
    }

    result += "\n";
    result += "node [shape=box];\n";

    for (T t : this.getTransitions()) {
      String fillColor = this.isEnabled(t) ? " fillcolor=\"#9ACD32\"" : "";
      if (t.isSilent())
        result +=
            String.format(
                "\tn%s[label=\"\" width=\".3\"" + fillColor + " height=\".1\"];\n",
                t.getId().replace("-", ""));
      else
        result +=
            String.format(
                "\tn%s[label=\"%s\" width=\".3\"" + fillColor + " height=\".3\"];\n",
                t.getId().replace("-", ""),
                t.getLabel());
    }

    result += "\n";
    for (F f : this.getFlow()) {
      result +=
          String.format(
              "\tn%s->n%s;\n",
              f.getSource().getId().replace("-", ""), f.getTarget().getId().replace("-", ""));
    }
    result += "}\n";

    return result;
  }
  /** @throws Exception If failed. */
  public void testTopologyListener() throws Exception {
    final Collection<UUID> added = new ArrayList<>(1);
    final Collection<UUID> rmvd = new ArrayList<>(1);

    final CountDownLatch addedLatch = new CountDownLatch(1);
    final CountDownLatch rmvLatch = new CountDownLatch(1);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    GridClientTopologyListener lsnr =
        new GridClientTopologyListener() {
          @Override
          public void onNodeAdded(GridClientNode node) {
            added.add(node.nodeId());

            addedLatch.countDown();
          }

          @Override
          public void onNodeRemoved(GridClientNode node) {
            rmvd.add(node.nodeId());

            rmvLatch.countDown();
          }
        };

    client.addTopologyListener(lsnr);

    try {
      Grid g = startGrid(NODES_CNT + 1);

      UUID id = g.localNode().id();

      assertTrue(addedLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, added.size());
      assertEquals(id, F.first(added));

      stopGrid(NODES_CNT + 1);

      assertTrue(rmvLatch.await(2 * TOP_REFRESH_FREQ, MILLISECONDS));

      assertEquals(1, rmvd.size());
      assertEquals(id, F.first(rmvd));
    } finally {
      client.removeTopologyListener(lsnr);

      stopGrid(NODES_CNT + 1);
    }
  }
 /** {@inheritDoc} */
 @Override
 public GridDeployment getDeployment(final GridUuid ldrId) {
   synchronized (mux) {
     return F.find(
         F.flat(cache.values()),
         null,
         new P1<SharedDeployment>() {
           @Override
           public boolean apply(SharedDeployment d) {
             return d.classLoaderId().equals(ldrId);
           }
         });
   }
 }
Пример #19
0
  /**
   * Executes example.
   *
   * @param args Command line arguments, none required.
   */
  public static void main(String[] args) {
    // Typedefs:
    // ---------
    // G -> GridFactory
    // CI1 -> GridInClosure
    // CO -> GridOutClosure
    // CA -> GridAbsClosure
    // F -> GridFunc

    // Data initialisation.
    Random rand = new Random();

    final int size = 20;

    Collection<Integer> nums = new ArrayList<Integer>(size);

    // Generate list of random integers.
    for (int i = 0; i < size; i++) {
      nums.add(rand.nextInt(size));
    }

    // Print generated list.
    X.println("Generated list:");

    F.forEach(nums, F.<Integer>print("", " "));

    // Get new unmodifiable collection with elements which value low than half generated list size.
    Collection<Integer> view =
        F.view(
            nums,
            new P1<Integer>() {
              @Override
              public boolean apply(Integer i) {
                return i < size / 2;
              }
            });

    // Print result.
    X.println("\nResult list:");

    F.forEach(view, F.<Integer>print("", " "));

    // Check for read only.
    try {
      view.add(12);
    } catch (Exception ignored) {
      X.println("\nView is read only.");
    }
  }
  /** {@inheritDoc} */
  @Override
  public Collection<ClusterNode> nodes(int p, AffinityTopologyVersion topVer) {
    Collection<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

    lock.readLock().lock();

    try {
      assert node2part != null && node2part.valid()
          : "Invalid node-to-partitions map [topVer1="
              + topVer
              + ", topVer2="
              + this.topVer
              + ", cache="
              + cctx.name()
              + ", node2part="
              + node2part
              + ']';

      Collection<ClusterNode> nodes = null;

      Collection<UUID> nodeIds = part2node.get(p);

      if (!F.isEmpty(nodeIds)) {
        Collection<UUID> affIds = new HashSet<>(F.viewReadOnly(affNodes, F.node2id()));

        for (UUID nodeId : nodeIds) {
          if (!affIds.contains(nodeId) && hasState(p, nodeId, OWNING, MOVING, RENTING)) {
            ClusterNode n = cctx.discovery().node(nodeId);

            if (n != null
                && (topVer.topologyVersion() < 0 || n.order() <= topVer.topologyVersion())) {
              if (nodes == null) {
                nodes = new ArrayList<>(affNodes.size() + 2);

                nodes.addAll(affNodes);
              }

              nodes.add(n);
            }
          }
        }
      }

      return nodes != null ? nodes : affNodes;
    } finally {
      lock.readLock().unlock();
    }
  }
  /**
   * @param rmtReducer Optional reducer.
   * @param rmtTransform Optional transformer.
   * @param args Arguments.
   * @return Future.
   */
  @SuppressWarnings("IfMayBeConditional")
  private <R> GridCacheQueryFuture<R> execute(
      @Nullable GridReducer<T, R> rmtReducer,
      @Nullable GridClosure<T, R> rmtTransform,
      @Nullable Object... args) {
    Collection<GridNode> nodes = nodes();

    cctx.checkSecurity(GridSecurityPermission.CACHE_READ);

    if (F.isEmpty(nodes))
      return new GridCacheQueryErrorFuture<>(
          cctx.kernalContext(),
          new GridEmptyProjectionException("There are no data nodes for cache: " + cctx.namexx()));

    if (log.isDebugEnabled())
      log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');

    if (cctx.deploymentEnabled()) {
      try {
        cctx.deploy().registerClasses(filter, rmtReducer, rmtTransform);
        cctx.deploy().registerClasses(args);
      } catch (GridException e) {
        return new GridCacheQueryErrorFuture<>(cctx.kernalContext(), e);
      }
    }

    if (subjId == null) subjId = cctx.localNodeId();

    taskHash = cctx.kernalContext().job().currentTaskNameHash();

    GridCacheQueryBean bean =
        new GridCacheQueryBean(
            this,
            (GridReducer<Object, Object>) rmtReducer,
            (GridClosure<Object, Object>) rmtTransform,
            args);

    GridCacheQueryManager qryMgr = cctx.queries();

    boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());

    if (type == SQL_FIELDS)
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryFieldsLocal(bean) : qryMgr.queryFieldsDistributed(bean, nodes));
    else
      return (GridCacheQueryFuture<R>)
          (loc ? qryMgr.queryLocal(bean) : qryMgr.queryDistributed(bean, nodes));
  }
Пример #22
0
  /**
   * @param ctx Kernal context.
   * @param cfg Ignite configuration.
   * @param providers Plugin providers.
   */
  @SuppressWarnings("TypeMayBeWeakened")
  public IgnitePluginProcessor(
      GridKernalContext ctx, IgniteConfiguration cfg, List<PluginProvider> providers) {
    super(ctx);

    ExtensionRegistryImpl registry = new ExtensionRegistryImpl();

    for (PluginProvider provider : providers) {
      GridPluginContext pluginCtx = new GridPluginContext(ctx, cfg);

      if (F.isEmpty(provider.name())) throw new IgniteException("Plugin name can not be empty.");

      if (plugins.containsKey(provider.name()))
        throw new IgniteException("Duplicated plugin name: " + provider.name());

      plugins.put(provider.name(), provider);

      pluginCtxMap.put(provider, pluginCtx);

      provider.initExtensions(pluginCtx, registry);

      if (provider.plugin() == null) throw new IgniteException("Plugin is null.");
    }

    extensions = registry.createExtensionMap();
  }
Пример #23
0
  /**
   * @param mapping Mapping to order.
   * @param committedVers Committed versions.
   * @param rolledbackVers Rolled back versions.
   */
  void orderCompleted(
      GridDistributedTxMapping<K, V> mapping,
      Collection<GridCacheVersion> committedVers,
      Collection<GridCacheVersion> rolledbackVers) {
    for (GridCacheTxEntry<K, V> txEntry : F.concat(false, mapping.reads(), mapping.writes())) {
      while (true) {
        GridDistributedCacheEntry<K, V> entry = (GridDistributedCacheEntry<K, V>) txEntry.cached();

        try {
          // Handle explicit locks.
          GridCacheVersion base =
              txEntry.explicitVersion() != null ? txEntry.explicitVersion() : xidVer;

          entry.doneRemote(xidVer, base, committedVers, rolledbackVers);

          if (ec()) entry.recheck();

          break;
        } catch (GridCacheEntryRemovedException ignored) {
          assert entry.obsoleteVersion() != null;

          if (log.isDebugEnabled())
            log.debug(
                "Replacing obsolete entry in remote transaction [entry="
                    + entry
                    + ", tx="
                    + this
                    + ']');

          // Replace the entry.
          txEntry.cached(cctx.cache().entryEx(txEntry.key()), entry.keyBytes());
        }
      }
    }
  }
  /**
   * @param cctx Cache context.
   * @param prj Projection (optional).
   * @return Collection of data nodes in provided projection (if any).
   */
  private static Collection<ClusterNode> nodes(
      final GridCacheContext<?, ?> cctx,
      @Nullable final ClusterGroup prj,
      @Nullable final Integer part) {
    assert cctx != null;

    final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();

    Collection<ClusterNode> affNodes = CU.affinityNodes(cctx);

    if (prj == null && part == null) return affNodes;

    final Set<ClusterNode> owners =
        part == null
            ? Collections.<ClusterNode>emptySet()
            : new HashSet<>(cctx.topology().owners(part, topVer));

    return F.view(
        affNodes,
        new P1<ClusterNode>() {
          @Override
          public boolean apply(ClusterNode n) {

            return cctx.discovery().cacheAffinityNode(n, cctx.name())
                && (prj == null || prj.node(n.id()) != null)
                && (part == null || owners.contains(n));
          }
        });
  }
Пример #25
0
  /** {@inheritDoc} */
  @Override
  public boolean apply(@Nullable T1 t1, @Nullable T2 t2) {
    lazyCompile();

    if (expr != null) {
      JexlContext ctx = new MapContext();

      ctx.set(var1, t1);
      ctx.set(var2, t2);

      for (Map.Entry<String, Object> e : map.entrySet()) {
        ctx.set(e.getKey(), e.getValue());
      }

      try {
        Object obj = expr.evaluate(ctx);

        if (obj instanceof Boolean) {
          return (Boolean) obj;
        }
      } catch (Exception ex) {
        throw F.wrap(ex);
      }
    }

    return false;
  }
  /** {@inheritDoc} */
  @Override
  public V unswap(K key) throws GridException {
    ctx.denyOnFlags(F.asList(READ, SKIP_SWAP));

    // Unswap only from DHT. Near cache does not have swap storage.
    return dht.unswap(key);
  }
  /** {@inheritDoc} */
  @Override
  public Map<K, V> peekAll(
      @Nullable Collection<? extends K> keys, @Nullable Collection<GridCachePeekMode> modes)
      throws GridException {
    if (keys == null || keys.isEmpty()) return emptyMap();

    final Collection<K> skipped = new GridLeanSet<K>();

    final Map<K, V> map =
        !modes.contains(PARTITIONED_ONLY)
            ? peekAll0(keys, modes, ctx.tm().localTxx(), skipped)
            : new GridLeanMap<K, V>(0);

    if (map.size() != keys.size() && !modes.contains(NEAR_ONLY)) {
      map.putAll(
          dht.peekAll(
              F.view(
                  keys,
                  new P1<K>() {
                    @Override
                    public boolean apply(K k) {
                      return !map.containsKey(k) && !skipped.contains(k);
                    }
                  }),
              modes));
    }

    return map;
  }
Пример #28
0
  /**
   * Prints every word a phrase on different nodes.
   *
   * @param phrase Phrase from which to print words on different nodes.
   * @throws GridException If failed.
   */
  private static void spreadWordsClosure(String phrase) throws GridException {
    X.println(">>> Starting spreadWordsClosure() example...");

    // Splits the passed in phrase into words and prints every word
    // on a individual grid node. If there are more words than nodes -
    // some nodes will print more than one word.
    G.grid()
        .run(
            SPREAD,
            F.yield(
                phrase.split(" "),
                new GridInClosure<String>() {
                  @Override
                  public void apply(String word) {
                    X.println(word);
                  }
                }));

    // NOTE:
    //
    // Alternatively, you can use existing closure 'F.println()' to
    // print any yield result in 'F.yield()' like so:
    //
    // G.grid().run(SPREAD, F.yield(phrase.split(" "), F.println()));
    //

    X.println(">>>");
    X.println(
        ">>> Finished printing individual words on different nodes based on GridGain 3.0 API.");
    X.println(">>> Check all nodes for output (this node is also part of the grid).");
    X.println(">>>");
  }
Пример #29
0
  /**
   * Prints a phrase on the grid nodes running anonymous closure objects and calculating total
   * number of letters.
   *
   * @param phrase Phrase to print on of the grid nodes.
   * @throws GridException If failed.
   */
  private static void countLettersClosure(String phrase) throws GridException {
    X.println(">>> Starting countLettersClosure() example...");

    // Explicitly execute the collection of callable objects and receive a result.
    Collection<Integer> results =
        G.grid()
            .call(
                SPREAD,
                new GridClosure<String, Integer>() { // Create executable logic.
                  @Override
                  public Integer apply(String word) {
                    // Print out a given word, just so we can
                    // see which node is doing what.
                    X.println(">>> Executing word: " + word);

                    // Return the length of a given word, i.e. number of letters.
                    return word.length();
                  }
                },
                Arrays.asList(phrase.split(" "))); // Collection of arguments for closures.

    // Add up all results using convenience 'sum()' method.
    int letterCnt = F.sum(results);

    X.println(">>>");
    X.println(">>> Finished execution of counting letters with closure based on GridGain 3.0 API.");
    X.println(">>> You should see the phrase '" + phrase + "' printed out on the nodes.");
    X.println(">>> Total number of letters in the phrase is '" + letterCnt + "'.");
    X.println(">>> Check all nodes for output (this node is also part of the grid).");
    X.println(">>>");
  }
Пример #30
0
  /**
   * Prints a phrase on the grid nodes running anonymous callable objects and calculating total
   * number of letters.
   *
   * @param phrase Phrase to print on of the grid nodes.
   * @throws GridException If failed.
   */
  private static void countLettersCallable(String phrase) throws GridException {
    X.println(">>> Starting countLettersCallable() example...");

    Collection<Callable<Integer>> calls = new HashSet<Callable<Integer>>();

    for (final String word : phrase.split(" "))
      calls.add(
          new GridCallable<Integer>() { // Create executable logic.
            @Override
            public Integer call() throws Exception {
              // Print out a given word, just so we can
              // see which node is doing what.
              X.println(">>> Executing word: " + word);

              // Return the length of a given word, i.e. number of letters.
              return word.length();
            }
          });

    // Explicitly execute the collection of callable objects and receive a result.
    Collection<Integer> results = G.grid().call(SPREAD, calls);

    // Add up all results using convenience 'sum()' method on GridFunc class.
    int letterCnt = F.sum(results);

    X.println(">>>");
    X.println(
        ">>> Finished execution of counting letters with callables based on GridGain 3.0 API.");
    X.println(">>> You should see the phrase '" + phrase + "' printed out on the nodes.");
    X.println(">>> Total number of letters in the phrase is '" + letterCnt + "'.");
    X.println(">>> Check all nodes for output (this node is also part of the grid).");
    X.println(">>>");
  }