/**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testGetAndIncrement() throws Exception {
    Collection<Long> res = new HashSet<>();

    String seqName = UUID.randomUUID().toString();

    for (int i = 0; i < GRID_CNT; i++) {
      Set<Long> retVal =
          compute(grid(i).cluster().forLocal()).call(new GetAndIncrementJob(seqName, RETRIES));

      for (Long l : retVal) assert !res.contains(l) : "Value already was used " + l;

      res.addAll(retVal);
    }

    assert res.size() == GRID_CNT * RETRIES;

    int gapSize = 0;

    for (long i = 0; i < GRID_CNT * RETRIES; i++) {
      if (!res.contains(i)) gapSize++;
      else gapSize = 0;

      assert gapSize <= BATCH_SIZE + 1
          : "Gap above id  " + i + " is " + gapSize + " more than batch size: " + (BATCH_SIZE + 1);
    }
  }
コード例 #2
0
  /**
   * Change topology.
   *
   * @param parent Grid to execute tasks on.
   * @param add New nodes count.
   * @param rmv Remove nodes count.
   * @param type Type of nodes to manipulate.
   */
  private static void changeTopology(Ignite parent, int add, int rmv, String type) {
    Collection<ComputeTaskFuture<?>> tasks = new ArrayList<>();

    IgniteCompute comp = parent.compute().withAsync();

    // Start nodes in parallel.
    while (add-- > 0) {
      comp.execute(ClientStartNodeTask.class, type);

      tasks.add(comp.future());
    }

    for (ComputeTaskFuture<?> task : tasks) task.get();

    // Stop nodes in sequence.
    while (rmv-- > 0) parent.compute().execute(ClientStopNodeTask.class, type);

    // Wait for node stops.
    // U.sleep(1000);

    Collection<String> gridNames = new ArrayList<>();

    for (Ignite g : G.allGrids()) gridNames.add(g.name());

    parent.log().info(">>> Available grids: " + gridNames);
  }
コード例 #3
0
    /** {@inheritDoc} */
    @Override
    public Collection<? extends ComputeJob> split(int gridSize, Serializable arg) {
      if (log.isInfoEnabled())
        log.info("Splitting task [task=" + this + ", gridSize=" + gridSize + ", arg=" + arg + ']');

      Collection<GridCancelTestJob> jobs = new ArrayList<>(SPLIT_COUNT);

      for (int i = 0; i < SPLIT_COUNT; i++) jobs.add(new GridCancelTestJob());

      return jobs;
    }
コード例 #4
0
  /** {@inheritDoc} */
  @SuppressWarnings("ErrorNotRethrown")
  @Override
  public IpcEndpoint accept() throws IgniteCheckedException {
    while (!Thread.currentThread().isInterrupted()) {
      Socket sock = null;

      boolean accepted = false;

      try {
        sock = srvSock.accept();

        accepted = true;

        InputStream inputStream = sock.getInputStream();
        ObjectInputStream in = new ObjectInputStream(inputStream);

        ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());

        IpcSharedMemorySpace inSpace = null;

        IpcSharedMemorySpace outSpace = null;

        boolean err = true;

        try {
          IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();

          if (log.isDebugEnabled()) log.debug("Processing request: " + req);

          IgnitePair<String> p = inOutToken(req.pid(), size);

          String file1 = p.get1();
          String file2 = p.get2();

          assert file1 != null;
          assert file2 != null;

          // Create tokens.
          new File(file1).createNewFile();
          new File(file2).createNewFile();

          if (log.isDebugEnabled()) log.debug("Created token files: " + p);

          inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);

          outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);

          IpcSharedMemoryClientEndpoint ret =
              new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);

          out.writeObject(
              new IpcSharedMemoryInitResponse(
                  file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));

          err = !in.readBoolean();

          endpoints.add(ret);

          return ret;
        } catch (UnsatisfiedLinkError e) {
          throw IpcSharedMemoryUtils.linkError(e);
        } catch (IOException e) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to process incoming connection "
                    + "(was connection closed by another party):"
                    + e.getMessage());
        } catch (ClassNotFoundException e) {
          U.error(log, "Failed to process incoming connection.", e);
        } catch (ClassCastException e) {
          String msg =
              "Failed to process incoming connection (most probably, shared memory "
                  + "rest endpoint has been configured by mistake).";

          LT.warn(log, null, msg);

          sendErrorResponse(out, e);
        } catch (IpcOutOfSystemResourcesException e) {
          if (!omitOutOfResourcesWarn) LT.warn(log, null, OUT_OF_RESOURCES_MSG);

          sendErrorResponse(out, e);
        } catch (IgniteCheckedException e) {
          LT.error(log, e, "Failed to process incoming shared memory connection.");

          sendErrorResponse(out, e);
        } finally {
          // Exception has been thrown, need to free system resources.
          if (err) {
            if (inSpace != null) inSpace.forceClose();

            // Safety.
            if (outSpace != null) outSpace.forceClose();
          }
        }
      } catch (IOException e) {
        if (!Thread.currentThread().isInterrupted() && !accepted)
          throw new IgniteCheckedException("Failed to accept incoming connection.", e);

        if (!closed)
          LT.error(
              log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
      } finally {
        U.closeQuiet(sock);
      }
    } // while

    throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
  }