/**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testGetAndIncrement() throws Exception {
    Collection<Long> res = new HashSet<>();

    String seqName = UUID.randomUUID().toString();

    for (int i = 0; i < GRID_CNT; i++) {
      Set<Long> retVal =
          compute(grid(i).cluster().forLocal()).call(new GetAndIncrementJob(seqName, RETRIES));

      for (Long l : retVal) assert !res.contains(l) : "Value already was used " + l;

      res.addAll(retVal);
    }

    assert res.size() == GRID_CNT * RETRIES;

    int gapSize = 0;

    for (long i = 0; i < GRID_CNT * RETRIES; i++) {
      if (!res.contains(i)) gapSize++;
      else gapSize = 0;

      assert gapSize <= BATCH_SIZE + 1
          : "Gap above id  " + i + " is " + gapSize + " more than batch size: " + (BATCH_SIZE + 1);
    }
  }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testMarshalling() throws Exception {
    String seqName = UUID.randomUUID().toString();

    final IgniteAtomicSequence seq = grid(0).atomicSequence(seqName, 0, true);

    grid(1)
        .compute()
        .run(
            new CAX() {
              @Override
              public void applyx() {
                assertNotNull(seq);

                for (int i = 0; i < RETRIES; i++) seq.incrementAndGet();
              }
            });
  }
  /** {@inheritDoc} */
  @Override
  protected Object executeJob(int gridSize, String type) {
    log.info(">>> Starting new grid node [currGridSize=" + gridSize + ", arg=" + type + "]");

    if (type == null) throw new IllegalArgumentException("Node type to start should be specified.");

    IgniteConfiguration cfg = getConfig(type);

    // Generate unique for this VM grid name.
    String gridName = cfg.getGridName() + " (" + UUID.randomUUID() + ")";

    // Update grid name (required to be unique).
    cfg.setGridName(gridName);

    // Start new node in current VM.
    Ignite g = G.start(cfg);

    log.info(
        ">>> Grid started [nodeId=" + g.cluster().localNode().id() + ", name='" + g.name() + "']");

    return true;
  }
  /** {@inheritDoc} */
  @Override
  public ClusterStartNodeResult call() {
    JSch ssh = new JSch();

    Session ses = null;

    try {
      if (spec.key() != null) ssh.addIdentity(spec.key().getAbsolutePath());

      ses = ssh.getSession(spec.username(), spec.host(), spec.port());

      if (spec.password() != null) ses.setPassword(spec.password());

      ses.setConfig("StrictHostKeyChecking", "no");

      ses.connect(timeout);

      boolean win = isWindows(ses);

      char separator = win ? '\\' : '/';

      spec.fixPaths(separator);

      String igniteHome = spec.igniteHome();

      if (igniteHome == null) igniteHome = win ? DFLT_IGNITE_HOME_WIN : DFLT_IGNITE_HOME_LINUX;

      String script = spec.script();

      if (script == null) script = DFLT_SCRIPT_LINUX;

      String cfg = spec.configuration();

      if (cfg == null) cfg = "";

      String startNodeCmd;
      String scriptOutputFileName =
          FILE_NAME_DATE_FORMAT.format(new Date())
              + '-'
              + UUID.randomUUID().toString().substring(0, 8)
              + ".log";

      if (win)
        throw new UnsupportedOperationException(
            "Apache Ignite cannot be auto-started on Windows from IgniteCluster.startNodes(…) API.");
      else { // Assume Unix.
        int spaceIdx = script.indexOf(' ');

        String scriptPath = spaceIdx > -1 ? script.substring(0, spaceIdx) : script;
        String scriptArgs = spaceIdx > -1 ? script.substring(spaceIdx + 1) : "";
        String rmtLogArgs = buildRemoteLogArguments(spec.username(), spec.host());
        String tmpDir = env(ses, "$TMPDIR", "/tmp/");
        String scriptOutputDir = tmpDir + "ignite-startNodes";

        shell(ses, "mkdir " + scriptOutputDir);

        // Mac os don't support ~ in double quotes. Trying get home path from remote system.
        if (igniteHome.startsWith("~")) {
          String homeDir = env(ses, "$HOME", "~");

          igniteHome = igniteHome.replaceFirst("~", homeDir);
        }

        startNodeCmd =
            new SB()
                .
                // Console output is consumed, started nodes must use Ignite file appenders for log.
                a("nohup ")
                .a("\"")
                .a(igniteHome)
                .a('/')
                .a(scriptPath)
                .a("\"")
                .a(" ")
                .a(scriptArgs)
                .a(!cfg.isEmpty() ? " \"" : "")
                .a(cfg)
                .a(!cfg.isEmpty() ? "\"" : "")
                .a(rmtLogArgs)
                .a(" > ")
                .a(scriptOutputDir)
                .a("/")
                .a(scriptOutputFileName)
                .a(" 2>& 1 &")
                .toString();
      }

      info("Starting remote node with SSH command: " + startNodeCmd, spec.logger(), log);

      shell(ses, startNodeCmd);

      return new ClusterStartNodeResultImpl(spec.host(), true, null);
    } catch (IgniteInterruptedCheckedException e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, e.getMessage());
    } catch (Exception e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, X.getFullStackTrace(e));
    } finally {
      if (ses != null && ses.isConnected()) ses.disconnect();
    }
  }
  /** @throws Exception If failed. */
  public void testCompact() throws Exception {
    File file = new File(UUID.randomUUID().toString());

    X.println("file: " + file.getPath());

    FileSwapSpaceSpi.SwapFile f = new FileSwapSpaceSpi.SwapFile(file, 8);

    Random rnd = new Random();

    ArrayList<FileSwapSpaceSpi.SwapValue> arr = new ArrayList<>();

    int size = 0;

    for (int a = 0; a < 100; a++) {
      FileSwapSpaceSpi.SwapValue[] vals = new FileSwapSpaceSpi.SwapValue[1 + rnd.nextInt(10)];

      int size0 = 0;

      for (int i = 0; i < vals.length; i++) {
        byte[] bytes = new byte[1 + rnd.nextInt(49)];

        rnd.nextBytes(bytes);

        size0 += bytes.length;

        vals[i] = new FileSwapSpaceSpi.SwapValue(bytes);

        arr.add(vals[i]);
      }

      f.write(new FileSwapSpaceSpi.SwapValues(vals, size0), 1);

      size += size0;

      assertEquals(f.length(), size);
      assertEquals(file.length(), size);
    }

    int i = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) assertEquals(val.idx(), ++i);

    i = 0;

    for (int cnt = arr.size() / 2; i < cnt; i++) {

      FileSwapSpaceSpi.SwapValue v = arr.remove(rnd.nextInt(arr.size()));

      assertTrue(f.tryRemove(v.idx(), v));
    }

    int hash0 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash0 += Arrays.hashCode(val.readValue(f.readCh));

    ArrayList<T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>>> bufs = new ArrayList();

    for (; ; ) {
      ArrayDeque<FileSwapSpaceSpi.SwapValue> que = new ArrayDeque<>();

      ByteBuffer buf = f.compact(que, 1024);

      if (buf == null) break;

      bufs.add(new T2(buf, que));
    }

    f.delete();

    int hash1 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash1 += Arrays.hashCode(val.value(null));

    assertEquals(hash0, hash1);

    File file0 = new File(UUID.randomUUID().toString());

    FileSwapSpaceSpi.SwapFile f0 = new FileSwapSpaceSpi.SwapFile(file0, 8);

    for (T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>> t : bufs)
      f0.write(t.get2(), t.get1(), 1);

    int hash2 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash2 += Arrays.hashCode(val.readValue(f0.readCh));

    assertEquals(hash2, hash1);
  }