/** {@inheritDoc} */
    @Override
    public Serializable reduce(List<GridComputeJobResult> results) throws GridException {
      if (log.isInfoEnabled()) log.info("Reducing job [job=" + this + ", results=" + results + ']');

      if (results.size() > 1) fail();

      return results.get(0).getData();
    }
  /**
   * Increases priority if job has bumped down.
   *
   * @param waitJobs Ordered collection of collision contexts for jobs that are currently waiting
   *     for execution.
   * @param passiveJobs Reordered collection of collision contexts for waiting jobs.
   */
  private void bumpPriority(
      Collection<GridCollisionJobContext> waitJobs, List<GridCollisionJobContext> passiveJobs) {
    assert waitJobs != null;
    assert passiveJobs != null;
    assert waitJobs.size() == passiveJobs.size();

    for (int i = 0; i < passiveJobs.size(); i++) {
      GridCollisionJobContext ctx = passiveJobs.get(i);

      if (i > indexOf(waitJobs, ctx))
        ctx.getJobContext().setAttribute(jobAttrKey, getJobPriority(ctx) + starvationInc);
    }
  }
  /**
   * Gets values referenced by sequential keys, e.g. {@code key1...keyN}.
   *
   * @param keyPrefix Key prefix, e.g. {@code key} for {@code key1...keyN}.
   * @param params Parameters map.
   * @return Values.
   */
  @Nullable
  protected List<Object> values(String keyPrefix, Map<String, Object> params) {
    assert keyPrefix != null;

    List<Object> vals = new LinkedList<>();

    for (int i = 1; ; i++) {
      String key = keyPrefix + i;

      if (params.containsKey(key)) vals.add(params.get(key));
      else break;
    }

    return vals;
  }
  /** {@inheritDoc} */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(List<GridNode> subgrid, Integer arg)
      throws GridException {
    assert taskSes != null;
    assert arg != null;
    assert arg > 0;

    Map<GridSessionLoadTestJob, GridNode> map = new HashMap<>(subgrid.size());

    Iterator<GridNode> iter = subgrid.iterator();

    Random rnd = new Random();

    params = new HashMap<>(arg);

    Collection<UUID> assigned = new ArrayList<>(subgrid.size());

    for (int i = 0; i < arg; i++) {
      // Recycle iterator.
      if (!iter.hasNext()) iter = subgrid.iterator();

      String paramName = UUID.randomUUID().toString();

      int paramVal = rnd.nextInt();

      taskSes.setAttribute(paramName, paramVal);

      GridNode node = iter.next();

      assigned.add(node.id());

      map.put(new GridSessionLoadTestJob(paramName), node);

      params.put(paramName, paramVal);

      if (log.isDebugEnabled())
        log.debug("Set session attribute [name=" + paramName + ", value=" + paramVal + ']');
    }

    taskSes.setAttribute("nodes", assigned);

    return map;
  }
  /** {@inheritDoc} */
  @SuppressWarnings("BusyWait")
  @Override
  public Boolean reduce(List<GridComputeJobResult> results) throws GridException {
    assert taskSes != null;
    assert results != null;
    assert params != null;
    assert !params.isEmpty();
    assert results.size() == params.size();

    Map<String, Integer> receivedParams = new HashMap<>();

    boolean allAttrReceived = false;

    int cnt = 0;

    while (!allAttrReceived && cnt++ < 3) {
      allAttrReceived = true;

      for (Map.Entry<String, Integer> entry : params.entrySet()) {
        assert taskSes.getAttribute(entry.getKey()) != null;

        Integer newVal = (Integer) taskSes.getAttribute(entry.getKey());

        assert newVal != null;

        receivedParams.put(entry.getKey(), newVal);

        if (newVal != entry.getValue() + 1) allAttrReceived = false;
      }

      if (!allAttrReceived) {
        try {
          Thread.sleep(100);
        } catch (InterruptedException e) {
          throw new GridException("Thread interrupted.", e);
        }
      }
    }

    if (log.isDebugEnabled()) {
      for (Map.Entry<String, Integer> entry : receivedParams.entrySet()) {
        log.debug(
            "Received session attr value [name="
                + entry.getKey()
                + ", val="
                + entry.getValue()
                + ", expected="
                + (params.get(entry.getKey()) + 1)
                + ']');
      }
    }

    return allAttrReceived;
  }
  /**
   * Creates REST request.
   *
   * @param cmd Command.
   * @param params Parameters.
   * @return REST request.
   * @throws GridException If creation failed.
   */
  @Nullable
  private GridRestRequest createRequest(
      GridRestCommand cmd, Map<String, Object> params, ServletRequest req) throws GridException {
    GridRestRequest restReq;

    switch (cmd) {
      case CACHE_GET:
      case CACHE_GET_ALL:
      case CACHE_PUT:
      case CACHE_PUT_ALL:
      case CACHE_REMOVE:
      case CACHE_REMOVE_ALL:
      case CACHE_ADD:
      case CACHE_CAS:
      case CACHE_METRICS:
      case CACHE_REPLACE:
      case CACHE_DECREMENT:
      case CACHE_INCREMENT:
      case CACHE_APPEND:
      case CACHE_PREPEND:
        {
          GridRestCacheRequest restReq0 = new GridRestCacheRequest();

          restReq0.cacheName((String) params.get("cacheName"));
          restReq0.key(params.get("key"));
          restReq0.value(params.get("val"));
          restReq0.value2(params.get("val2"));

          Object val1 = params.get("val1");

          if (val1 != null) restReq0.value(val1);

          restReq0.cacheFlags(intValue("cacheFlags", params, 0));
          restReq0.ttl(longValue("exp", params, null));
          restReq0.initial(longValue("init", params, null));
          restReq0.delta(longValue("delta", params, null));

          if (cmd == CACHE_GET_ALL || cmd == CACHE_PUT_ALL || cmd == CACHE_REMOVE_ALL) {
            List<Object> keys = values("k", params);
            List<Object> vals = values("v", params);

            if (keys.size() < vals.size())
              throw new GridException(
                  "Number of keys must be greater or equals to number of values.");

            Map<Object, Object> map = U.newHashMap(keys.size());

            Iterator<Object> keyIt = keys.iterator();
            Iterator<Object> valIt = vals.iterator();

            while (keyIt.hasNext()) map.put(keyIt.next(), valIt.hasNext() ? valIt.next() : null);

            restReq0.values(map);
          }

          restReq = restReq0;

          break;
        }

      case TOPOLOGY:
      case NODE:
        {
          GridRestTopologyRequest restReq0 = new GridRestTopologyRequest();

          restReq0.includeMetrics(Boolean.parseBoolean((String) params.get("mtr")));
          restReq0.includeAttributes(Boolean.parseBoolean((String) params.get("attr")));

          restReq0.nodeIp((String) params.get("ip"));

          restReq0.nodeId(uuidValue("id", params));

          restReq = restReq0;

          break;
        }

      case EXE:
      case RESULT:
      case NOOP:
        {
          GridRestTaskRequest restReq0 = new GridRestTaskRequest();

          restReq0.taskId((String) params.get("id"));
          restReq0.taskName((String) params.get("name"));

          restReq0.params(values("p", params));

          restReq0.async(Boolean.parseBoolean((String) params.get("async")));

          restReq0.timeout(longValue("timeout", params, 0L));

          restReq = restReq0;

          break;
        }

      case LOG:
        {
          GridRestLogRequest restReq0 = new GridRestLogRequest();

          restReq0.path((String) params.get("path"));

          restReq0.from(intValue("from", params, -1));
          restReq0.to(intValue("to", params, -1));

          restReq = restReq0;

          break;
        }

      case VERSION:
        {
          restReq = new GridRestRequest();

          break;
        }

      default:
        throw new GridException("Invalid command: " + cmd);
    }

    restReq.address(new InetSocketAddress(req.getRemoteAddr(), req.getRemotePort()));

    restReq.command(cmd);

    if (params.containsKey("gridgain.login") || params.containsKey("gridgain.password")) {
      GridSecurityCredentials cred =
          new GridSecurityCredentials(
              (String) params.get("gridgain.login"), (String) params.get("gridgain.password"));

      restReq.credentials(cred);
    }

    String clientId = (String) params.get("clientId");

    try {
      if (clientId != null) restReq.clientId(UUID.fromString(clientId));
    } catch (Exception ignored) {
      // Ignore invalid client id. Rest handler will process this logic.
    }

    String destId = (String) params.get("destId");

    try {
      if (destId != null) restReq.destinationId(UUID.fromString(destId));
    } catch (IllegalArgumentException ignored) {
      // Don't fail - try to execute locally.
    }

    String sesTokStr = (String) params.get("sessionToken");

    try {
      if (sesTokStr != null) restReq.sessionToken(U.hexString2ByteArray(sesTokStr));
    } catch (IllegalArgumentException ignored) {
      // Ignore invalid session token.
    }

    return restReq;
  }
  /** {@inheritDoc} */
  @Override
  public void onCollision(
      Collection<GridCollisionJobContext> waitJobs,
      Collection<GridCollisionJobContext> activeJobs) {
    assert waitJobs != null;
    assert activeJobs != null;

    int activeSize = F.size(activeJobs, RUNNING_JOBS);

    waitingCnt.set(waitJobs.size());
    runningCnt.set(activeSize);
    heldCnt.set(activeJobs.size() - activeSize);

    int waitSize = waitJobs.size();

    int activateCnt = parallelJobsNum - activeSize;

    if (activateCnt > 0 && !waitJobs.isEmpty()) {
      if (waitJobs.size() <= activateCnt) {
        for (GridCollisionJobContext waitJob : waitJobs) {
          waitJob.activate();

          waitSize--;
        }
      } else {
        List<GridCollisionJobContext> passiveList =
            new ArrayList<GridCollisionJobContext>(waitJobs);

        Collections.sort(
            passiveList,
            new Comparator<GridCollisionJobContext>() {
              /** {@inheritDoc} */
              @Override
              public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
                int p1 = getJobPriority(o1);
                int p2 = getJobPriority(o2);

                return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
              }
            });

        if (preventStarvation) bumpPriority(waitJobs, passiveList);

        for (int i = 0; i < activateCnt; i++) {
          passiveList.get(i).activate();

          waitSize--;
        }
      }
    }

    if (waitSize > waitJobsNum) {
      List<GridCollisionJobContext> waitList = new ArrayList<GridCollisionJobContext>(waitJobs);

      // Put jobs with highest priority first.
      Collections.sort(
          waitList,
          new Comparator<GridCollisionJobContext>() {
            /** {@inheritDoc} */
            @Override
            public int compare(GridCollisionJobContext o1, GridCollisionJobContext o2) {
              int p1 = getJobPriority(o1);
              int p2 = getJobPriority(o2);

              return p1 < p2 ? 1 : p1 == p2 ? 0 : -1;
            }
          });

      int skip = waitJobs.size() - waitSize;

      int i = 0;

      for (GridCollisionJobContext waitCtx : waitList) {
        if (++i >= skip) {
          waitCtx.cancel();

          if (--waitSize <= waitJobsNum) break;
        }
      }
    }
  }
Exemplo n.º 8
0
  /**
   * Performs flush.
   *
   * @throws GridException If failed.
   */
  private void doFlush() throws GridException {
    lastFlushTime = U.currentTimeMillis();

    List<GridFuture> activeFuts0 = null;

    int doneCnt = 0;

    for (GridFuture<?> f : activeFuts) {
      if (!f.isDone()) {
        if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2));

        activeFuts0.add(f);
      } else {
        f.get();

        doneCnt++;
      }
    }

    if (activeFuts0 == null || activeFuts0.isEmpty()) return;

    while (true) {
      Queue<GridFuture<?>> q = null;

      for (Buffer buf : bufMappings.values()) {
        GridFuture<?> flushFut = buf.flush();

        if (flushFut != null) {
          if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2);

          q.add(flushFut);
        }
      }

      if (q != null) {
        assert !q.isEmpty();

        boolean err = false;

        for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) {
          try {
            fut.get();
          } catch (GridException e) {
            if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e);

            err = true;
          }
        }

        if (err)
          // Remaps needed - flush buffers.
          continue;
      }

      doneCnt = 0;

      for (int i = 0; i < activeFuts0.size(); i++) {
        GridFuture f = activeFuts0.get(i);

        if (f == null) doneCnt++;
        else if (f.isDone()) {
          f.get();

          doneCnt++;

          activeFuts0.set(i, null);
        } else break;
      }

      if (doneCnt == activeFuts0.size()) return;
    }
  }
Exemplo n.º 9
0
    /** {@inheritDoc} */
    @Override
    public String reduce(List<GridComputeJobResult> results) throws GridException {
      assert results.size() == 1;

      return results.get(0).getData();
    }
  /**
   * This method is called to map or split grid task into multiple grid jobs. This is the first
   * method that gets called when task execution starts.
   *
   * @param data Task execution argument. Can be {@code null}. This is the same argument as the one
   *     passed into {@code Grid#execute(...)} methods.
   * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed
   *     to be randomized by container. This ensures that every time you simply iterate through grid
   *     nodes, the order of nodes will be random which over time should result into all nodes being
   *     used equally.
   * @return Map of grid jobs assigned to subgrid node. Unless {@link
   *     GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is
   *     returned, exception will be thrown.
   * @throws GridException If mapping could not complete successfully. This exception will be thrown
   *     out of {@link GridComputeTaskFuture#get()} method.
   */
  @Override
  public Map<? extends GridComputeJob, GridNode> map(
      List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException {
    assert !subgrid.isEmpty();

    // Give preference to wanted node. Otherwise, take the first one.
    GridNode targetNode =
        F.find(
            subgrid,
            subgrid.get(0),
            new GridPredicate<GridNode>() {
              @Override
              public boolean apply(GridNode e) {
                return preferredNode.equals(e.id());
              }
            });

    return Collections.singletonMap(
        new GridComputeJobAdapter() {
          @GridLoggerResource private GridLogger log;

          @GridInstanceResource private Grid grid;

          @Override
          public Object execute() throws GridException {
            log.info("Going to put data: " + data.size());

            GridCache<Object, Object> cache = grid.cache(cacheName);

            assert cache != null;

            Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data);

            for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) {
              T2<Integer, Collection<Integer>> pair = entry.getValue();

              Object affKey = pair.get1();

              // Group lock partition.
              try (GridCacheTx tx =
                  cache.txStartPartition(
                      cache.affinity().partition(affKey),
                      optimistic ? OPTIMISTIC : PESSIMISTIC,
                      REPEATABLE_READ,
                      0,
                      pair.get2().size())) {
                for (Integer val : pair.get2()) cache.put(val, val);

                tx.commit();
              }
            }

            log.info("Finished put data: " + data.size());

            return data;
          }

          /**
           * Groups values by partitions.
           *
           * @param data Data to put.
           * @return Grouped map.
           */
          private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) {
            GridCache<Object, Object> cache = grid.cache(cacheName);

            Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>();

            for (Integer val : data) {
              int part = cache.affinity().partition(val);

              T2<Integer, Collection<Integer>> tup = res.get(part);

              if (tup == null) {
                tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>());

                res.put(part, tup);
              }

              tup.get2().add(val);
            }

            return res;
          }
        },
        targetNode);
  }