/**
   * @param meta Job metadata.
   * @return {@code true} If local node is participating in job execution.
   */
  public boolean isParticipating(HadoopJobMetadata meta) {
    UUID locNodeId = localNodeId();

    if (locNodeId.equals(meta.submitNodeId())) return true;

    HadoopMapReducePlan plan = meta.mapReducePlan();

    return plan.mapperNodeIds().contains(locNodeId)
        || plan.reducerNodeIds().contains(locNodeId)
        || jobUpdateLeader();
  }
    /** {@inheritDoc} */
    @SuppressWarnings({"unchecked"})
    @Override
    public boolean equals(Object obj) {
      if (obj == this) return true;

      CancelMessageId other = (CancelMessageId) obj;

      return reqId == other.reqId && nodeId.equals(other.nodeId);
    }
  /** {@inheritDoc} */
  @Override
  public ClusterStartNodeResult call() {
    JSch ssh = new JSch();

    Session ses = null;

    try {
      if (spec.key() != null) ssh.addIdentity(spec.key().getAbsolutePath());

      ses = ssh.getSession(spec.username(), spec.host(), spec.port());

      if (spec.password() != null) ses.setPassword(spec.password());

      ses.setConfig("StrictHostKeyChecking", "no");

      ses.connect(timeout);

      boolean win = isWindows(ses);

      char separator = win ? '\\' : '/';

      spec.fixPaths(separator);

      String igniteHome = spec.igniteHome();

      if (igniteHome == null) igniteHome = win ? DFLT_IGNITE_HOME_WIN : DFLT_IGNITE_HOME_LINUX;

      String script = spec.script();

      if (script == null) script = DFLT_SCRIPT_LINUX;

      String cfg = spec.configuration();

      if (cfg == null) cfg = "";

      String startNodeCmd;
      String scriptOutputFileName =
          FILE_NAME_DATE_FORMAT.format(new Date())
              + '-'
              + UUID.randomUUID().toString().substring(0, 8)
              + ".log";

      if (win)
        throw new UnsupportedOperationException(
            "Apache Ignite cannot be auto-started on Windows from IgniteCluster.startNodes(…) API.");
      else { // Assume Unix.
        int spaceIdx = script.indexOf(' ');

        String scriptPath = spaceIdx > -1 ? script.substring(0, spaceIdx) : script;
        String scriptArgs = spaceIdx > -1 ? script.substring(spaceIdx + 1) : "";
        String rmtLogArgs = buildRemoteLogArguments(spec.username(), spec.host());
        String tmpDir = env(ses, "$TMPDIR", "/tmp/");
        String scriptOutputDir = tmpDir + "ignite-startNodes";

        shell(ses, "mkdir " + scriptOutputDir);

        // Mac os don't support ~ in double quotes. Trying get home path from remote system.
        if (igniteHome.startsWith("~")) {
          String homeDir = env(ses, "$HOME", "~");

          igniteHome = igniteHome.replaceFirst("~", homeDir);
        }

        startNodeCmd =
            new SB()
                .
                // Console output is consumed, started nodes must use Ignite file appenders for log.
                a("nohup ")
                .a("\"")
                .a(igniteHome)
                .a('/')
                .a(scriptPath)
                .a("\"")
                .a(" ")
                .a(scriptArgs)
                .a(!cfg.isEmpty() ? " \"" : "")
                .a(cfg)
                .a(!cfg.isEmpty() ? "\"" : "")
                .a(rmtLogArgs)
                .a(" > ")
                .a(scriptOutputDir)
                .a("/")
                .a(scriptOutputFileName)
                .a(" 2>& 1 &")
                .toString();
      }

      info("Starting remote node with SSH command: " + startNodeCmd, spec.logger(), log);

      shell(ses, startNodeCmd);

      return new ClusterStartNodeResultImpl(spec.host(), true, null);
    } catch (IgniteInterruptedCheckedException e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, e.getMessage());
    } catch (Exception e) {
      return new ClusterStartNodeResultImpl(spec.host(), false, X.getFullStackTrace(e));
    } finally {
      if (ses != null && ses.isConnected()) ses.disconnect();
    }
  }
  /**
   * Updates value for single partition.
   *
   * @param p Partition.
   * @param nodeId Node ID.
   * @param state State.
   * @param updateSeq Update sequence.
   */
  @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
  private void updateLocal(int p, UUID nodeId, GridDhtPartitionState state, long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();
    assert nodeId.equals(cctx.nodeId());

    // In case if node joins, get topology at the time of joining node.
    ClusterNode oldest = CU.oldestAliveCacheServerNode(cctx.shared(), topVer);

    assert oldest != null;

    // If this node became the oldest node.
    if (oldest.id().equals(cctx.nodeId())) {
      long seq = node2part.updateSequence();

      if (seq != updateSeq) {
        if (seq > updateSeq) {
          if (this.updateSeq.get() < seq) {
            // Update global counter if necessary.
            boolean b = this.updateSeq.compareAndSet(this.updateSeq.get(), seq + 1);

            assert b
                : "Invalid update sequence [updateSeq="
                    + updateSeq
                    + ", seq="
                    + seq
                    + ", curUpdateSeq="
                    + this.updateSeq.get()
                    + ", node2part="
                    + node2part.toFullString()
                    + ']';

            updateSeq = seq + 1;
          } else updateSeq = seq;
        }

        node2part.updateSequence(updateSeq);
      }
    }

    GridDhtPartitionMap map = node2part.get(nodeId);

    if (map == null)
      node2part.put(
          nodeId,
          map =
              new GridDhtPartitionMap(
                  nodeId,
                  updateSeq,
                  Collections.<Integer, GridDhtPartitionState>emptyMap(),
                  false));

    map.updateSequence(updateSeq);

    map.put(p, state);

    Set<UUID> ids = part2node.get(p);

    if (ids == null) part2node.put(p, ids = U.newHashSet(3));

    ids.add(nodeId);
  }
  /**
   * @param updateSeq Update sequence.
   * @return Checks if any of the local partitions need to be evicted.
   */
  private boolean checkEvictions(long updateSeq) {
    assert lock.isWriteLockedByCurrentThread();

    boolean changed = false;

    UUID locId = cctx.nodeId();

    for (GridDhtLocalPartition part : locParts.values()) {
      GridDhtPartitionState state = part.state();

      if (state.active()) {
        int p = part.id();

        List<ClusterNode> affNodes = cctx.affinity().nodes(p, topVer);

        if (!affNodes.contains(cctx.localNode())) {
          Collection<UUID> nodeIds = F.nodeIds(nodes(p, topVer, OWNING));

          // If all affinity nodes are owners, then evict partition from local node.
          if (nodeIds.containsAll(F.nodeIds(affNodes))) {
            part.rent(false);

            updateLocal(part.id(), locId, part.state(), updateSeq);

            changed = true;

            if (log.isDebugEnabled())
              log.debug("Evicted local partition (all affinity nodes are owners): " + part);
          } else {
            int ownerCnt = nodeIds.size();
            int affCnt = affNodes.size();

            if (ownerCnt > affCnt) {
              List<ClusterNode> sorted = new ArrayList<>(cctx.discovery().nodes(nodeIds));

              // Sort by node orders in ascending order.
              Collections.sort(sorted, CU.nodeComparator(true));

              int diff = sorted.size() - affCnt;

              for (int i = 0; i < diff; i++) {
                ClusterNode n = sorted.get(i);

                if (locId.equals(n.id())) {
                  part.rent(false);

                  updateLocal(part.id(), locId, part.state(), updateSeq);

                  changed = true;

                  if (log.isDebugEnabled())
                    log.debug(
                        "Evicted local partition (this node is oldest non-affinity node): " + part);

                  break;
                }
              }
            }
          }
        }
      }
    }

    return changed;
  }
 /** {@inheritDoc} */
 @Override
 public int hashCode() {
   return 31 * ((int) (reqId ^ (reqId >>> 32))) + nodeId.hashCode();
 }