/**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testGetAndIncrement() throws Exception {
    Collection<Long> res = new HashSet<>();

    String seqName = UUID.randomUUID().toString();

    for (int i = 0; i < GRID_CNT; i++) {
      Set<Long> retVal =
          compute(grid(i).cluster().forLocal()).call(new GetAndIncrementJob(seqName, RETRIES));

      for (Long l : retVal) assert !res.contains(l) : "Value already was used " + l;

      res.addAll(retVal);
    }

    assert res.size() == GRID_CNT * RETRIES;

    int gapSize = 0;

    for (long i = 0; i < GRID_CNT * RETRIES; i++) {
      if (!res.contains(i)) gapSize++;
      else gapSize = 0;

      assert gapSize <= BATCH_SIZE + 1
          : "Gap above id  " + i + " is " + gapSize + " more than batch size: " + (BATCH_SIZE + 1);
    }
  }
    /** {@inheritDoc} */
    @SuppressWarnings({"unchecked"})
    @Override
    public boolean equals(Object obj) {
      if (obj == this) return true;

      CancelMessageId other = (CancelMessageId) obj;

      return reqId == other.reqId && nodeId.equals(other.nodeId);
    }
  /**
   * JUnit.
   *
   * @throws Exception If failed.
   */
  public void testMarshalling() throws Exception {
    String seqName = UUID.randomUUID().toString();

    final IgniteAtomicSequence seq = grid(0).atomicSequence(seqName, 0, true);

    grid(1)
        .compute()
        .run(
            new CAX() {
              @Override
              public void applyx() {
                assertNotNull(seq);

                for (int i = 0; i < RETRIES; i++) seq.incrementAndGet();
              }
            });
  }
  /** {@inheritDoc} */
  @Override
  public void start() throws IgniteCheckedException {
    IpcSharedMemoryNativeLoader.load(log);

    pid = IpcSharedMemoryUtils.pid();

    if (pid == -1) throw new IpcEndpointBindException("Failed to get PID of the current process.");

    if (size <= 0) throw new IpcEndpointBindException("Space size should be positive: " + size);

    String tokDirPath = this.tokDirPath;

    if (F.isEmpty(tokDirPath)) throw new IpcEndpointBindException("Token directory path is empty.");

    tokDirPath = tokDirPath + '/' + locNodeId.toString() + '-' + IpcSharedMemoryUtils.pid();

    tokDir = U.resolveWorkDirectory(tokDirPath, false);

    if (port <= 0 || port >= 0xffff)
      throw new IpcEndpointBindException("Port value is illegal: " + port);

    try {
      srvSock = new ServerSocket();

      // Always bind to loopback.
      srvSock.bind(new InetSocketAddress("127.0.0.1", port));
    } catch (IOException e) {
      // Although empty socket constructor never throws exception, close it just in case.
      U.closeQuiet(srvSock);

      throw new IpcEndpointBindException(
          "Failed to bind shared memory IPC endpoint (is port already " + "in use?): " + port, e);
    }

    gcWorker = new GcWorker(gridName, "ipc-shmem-gc", log);

    new IgniteThread(gcWorker).start();

    if (log.isInfoEnabled())
      log.info(
          "IPC shared memory server endpoint started [port="
              + port
              + ", tokDir="
              + tokDir.getAbsolutePath()
              + ']');
  }
  /** {@inheritDoc} */
  @Override
  protected Object executeJob(int gridSize, String type) {
    log.info(">>> Starting new grid node [currGridSize=" + gridSize + ", arg=" + type + "]");

    if (type == null) throw new IllegalArgumentException("Node type to start should be specified.");

    IgniteConfiguration cfg = getConfig(type);

    // Generate unique for this VM grid name.
    String gridName = cfg.getGridName() + " (" + UUID.randomUUID() + ")";

    // Update grid name (required to be unique).
    cfg.setGridName(gridName);

    // Start new node in current VM.
    Ignite g = G.start(cfg);

    log.info(
        ">>> Grid started [nodeId=" + g.cluster().localNode().id() + ", name='" + g.name() + "']");

    return true;
  }
  /** {@inheritDoc} */
  @Override
  public ClusterStartNodeResult call() {
    JSch ssh = new JSch();

    Session ses = null;

    try {
      if (spec.key() != null) ssh.addIdentity(spec.key().getAbsolutePath());

      ses = ssh.getSession(spec.username(), spec.host(), spec.port());

      if (spec.password() != null) ses.setPassword(spec.password());

      ses.setConfig("StrictHostKeyChecking", "no");

      ses.connect(timeout);

      boolean win = isWindows(ses);

      char separator = win ? '\\' : '/';

      spec.fixPaths(separator);

      String igniteHome = spec.igniteHome();

      if (igniteHome == null) igniteHome = win ? DFLT_IGNITE_HOME_WIN : DFLT_IGNITE_HOME_LINUX;

      String script = spec.script();

      if (script == null) script = DFLT_SCRIPT_LINUX;

      String cfg = spec.configuration();

      if (cfg == null) cfg = "";

      String startNodeCmd;
      String scriptOutputFileName =
          FILE_NAME_DATE_FORMAT.format(new Date())
              + '-'
              + UUID.randomUUID().toString().substring(0, 8)
              + ".log";

      if (win)
        throw new UnsupportedOperationException(
            "Apache Ignite cannot be auto-started on Windows from IgniteCluster.startNodes(…) API.");
      else { // Assume Unix.
        int spaceIdx = script.indexOf(' ');

        String scriptPath = spaceIdx > -1 ? script.substring(0, spaceIdx) : script;
        String scriptArgs = spaceIdx > -1 ? script.substring(spaceIdx + 1) : "";
        String rmtLogArgs = buildRemoteLogArguments(spec.username(), spec.host());
        String tmpDir = env(ses, "$TMPDIR", "/tmp/");
        String scriptOutputDir = tmpDir + "ignite-startNodes";

        shell(ses, "mkdir " + scriptOutputDir);

        // Mac os don't support ~ in double quotes. Trying get home path from remote system.
        if (igniteHome.startsWith("~")) {
          String homeDir = env(ses, "$HOME", "~");

          igniteHome = igniteHome.replaceFirst("~", homeDir);
        }

        startNodeCmd =
            new SB()
                .
                // Console output is consumed, started nodes must use Ignite file appenders for log.
                a("nohup ")
                .a("\"")
                .a(igniteHome)
                .a('/')
                .a(scriptPath)
                .a("\"")
                .a(" ")
                .a(scriptArgs)
                .a(!cfg.isEmpty() ? " \"" : "")
                .a(cfg)
                .a(!cfg.isEmpty() ? "\"" : "")
                .a(rmtLogArgs)
                .a(" > ")
                .a(scriptOutputDir)
                .a("/")
                .a(scriptOutputFileName)
                .a(" 2>& 1 &")
                .toString();
      }

      info("Starting remote node with SSH command: " + startNodeCmd, spec.logger(), log);

      shell(ses, startNodeCmd);

      return new ClusterStartNodeResultImpl(spec.host(), true, null);
    } catch (IgniteInterruptedCheckedException e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, e.getMessage());
    } catch (Exception e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, X.getFullStackTrace(e));
    } finally {
      if (ses != null && ses.isConnected()) ses.disconnect();
    }
  }
  /** @throws Exception If failed. */
  public void testCompact() throws Exception {
    File file = new File(UUID.randomUUID().toString());

    X.println("file: " + file.getPath());

    FileSwapSpaceSpi.SwapFile f = new FileSwapSpaceSpi.SwapFile(file, 8);

    Random rnd = new Random();

    ArrayList<FileSwapSpaceSpi.SwapValue> arr = new ArrayList<>();

    int size = 0;

    for (int a = 0; a < 100; a++) {
      FileSwapSpaceSpi.SwapValue[] vals = new FileSwapSpaceSpi.SwapValue[1 + rnd.nextInt(10)];

      int size0 = 0;

      for (int i = 0; i < vals.length; i++) {
        byte[] bytes = new byte[1 + rnd.nextInt(49)];

        rnd.nextBytes(bytes);

        size0 += bytes.length;

        vals[i] = new FileSwapSpaceSpi.SwapValue(bytes);

        arr.add(vals[i]);
      }

      f.write(new FileSwapSpaceSpi.SwapValues(vals, size0), 1);

      size += size0;

      assertEquals(f.length(), size);
      assertEquals(file.length(), size);
    }

    int i = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) assertEquals(val.idx(), ++i);

    i = 0;

    for (int cnt = arr.size() / 2; i < cnt; i++) {

      FileSwapSpaceSpi.SwapValue v = arr.remove(rnd.nextInt(arr.size()));

      assertTrue(f.tryRemove(v.idx(), v));
    }

    int hash0 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash0 += Arrays.hashCode(val.readValue(f.readCh));

    ArrayList<T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>>> bufs = new ArrayList();

    for (; ; ) {
      ArrayDeque<FileSwapSpaceSpi.SwapValue> que = new ArrayDeque<>();

      ByteBuffer buf = f.compact(que, 1024);

      if (buf == null) break;

      bufs.add(new T2(buf, que));
    }

    f.delete();

    int hash1 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash1 += Arrays.hashCode(val.value(null));

    assertEquals(hash0, hash1);

    File file0 = new File(UUID.randomUUID().toString());

    FileSwapSpaceSpi.SwapFile f0 = new FileSwapSpaceSpi.SwapFile(file0, 8);

    for (T2<ByteBuffer, ArrayDeque<FileSwapSpaceSpi.SwapValue>> t : bufs)
      f0.write(t.get2(), t.get1(), 1);

    int hash2 = 0;

    for (FileSwapSpaceSpi.SwapValue val : arr) hash2 += Arrays.hashCode(val.readValue(f0.readCh));

    assertEquals(hash2, hash1);
  }
  /**
   * Updates value for single partition.
   *
   * @param p Partition.
   * @param nodeId Node ID.
   * @param state State.
   * @param updateSeq Update sequence.
   */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();
    assert nodeId.equals(cctx.nodeId());

    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer);

    assert oldest != null;

    // If this node became the oldest node.
    if (oldest.id().equals(cctx.nodeId())) {
      long seq = node2part.updateSequence();

      if (seq != updateSeq) {
        if (seq > updateSeq) {
          if (this.updateSeq.get() < seq) {
            // Update global counter if necessary.
            boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);

            assert b
                : "Invalid update sequence [updateSeq="
                    + updateSeq
                    + ", seq="
                    + seq
                    + ", curUpdateSeq="
                    + this.updateSeq.get()
                    + ", node2part="
                    + node2part.toFullString()
                    + ']';

            updateSeq = seq + 1;
          } else updateSeq = seq;
        }

        node2part.updateSequence(updateSeq);
      }
    }

    GridDhtPartitionMap map = node2part.get(nodeId);

    if (map == null)
      node2part.put(
          nodeId,
          map =
              new GridDhtPartitionMap(
                  nodeId,
                  updateSeq,
                  Collections.<Integer, GridDhtPartitionState>emptyMap(),
                  false));

    map.updateSequence(updateSeq);

    map.put(p, state);

    Set<UUID> ids = part2node.get(p);

    if (ids == null) part2node.put(p, ids = U.newHashSet(3));

    ids.add(nodeId);
  }
  /**
   * @param updateSeq Update sequence.
   * @return Checks if any of the local partitions need to be evicted.
   */
  private boolean checkEvictions(long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();

    boolean changed = false;

    UUID locId = cctx.nodeId();

    for (GridDhtLocalPartition part : locParts.values()) {
      GridDhtPartitionState state = part.state();

      if (state.active()) {
        int p = part.id();

        List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

        if (!affNodes.contains(cctx.localNode())) {
          Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING));

          // If all affinity nodes are owners, then evict partition from local node.
          if (nodeIds.containsAll(F.nodeIds(affNodes))) {
            part.rent(false);

            updateLocal(part.id(), locId, part.state(), updateSeq);

            changed = true;

            if (log.isDebugEnabled())
              log.debug("Evicted local partition (all affinity nodes are owners): " + part);
          } else {
            int ownerCnt = nodeIds.size();
            int affCnt = affNodes.size();

            if (ownerCnt > affCnt) {
              List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds));

              // Sort by node orders in ascending order.
              Collections.sort(sorted, CU.nodeComparator(true));

              int diff = sorted.size() - affCnt;

              for (int i = 0; i < diff; i++) {
                ClusterNode n = sorted.get(i);

                if (locId.equals(n.id())) {
                  part.rent(false);

                  updateLocal(part.id(), locId, part.state(), updateSeq);

                  changed = true;

                  if (log.isDebugEnabled())
                    log.debug(
                        "Evicted local partition (this node is oldest non-affinity node): " + part);

                  break;
                }
              }
            }
          }
        }
      }
    }

    return changed;
  }
 /** {@inheritDoc} */
 @Override
 public int hashCode() {
   return 31 * ((int) (reqId ^ (reqId >>> 32))) + nodeId.hashCode();
 }