final void innerMoveToSecondPhase() throws Exception {
    if (logger.isTraceEnabled()) {
      StringBuilder sb = new StringBuilder();
      boolean hadOne = false;
      for (int i = 0; i < firstResults.length(); i++) {
        FirstResult result = firstResults.get(i);
        if (result == null) {
          continue; // failure
        }
        if (hadOne) {
          sb.append(",");
        } else {
          hadOne = true;
        }
        sb.append(result.shardTarget());
      }

      logger.trace(
          "Moving to second phase, based on results from: {} (cluster state version: {})",
          sb,
          clusterStateVersion);
    }
    moveToSecondPhase();
  }
  protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
    firstResults.set(shardIndex, result);

    if (logger.isTraceEnabled()) {
      logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
    }

    // clean a previous error on this shard group (note, this code will be serialized on the same
    // shardIndex value level
    // so its ok concurrency wise to miss potentially the shard failures being created because of
    // another failure
    // in the #addShardFailure, because by definition, it will happen on *another* shardIndex
    AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
    if (shardFailures != null) {
      shardFailures.set(shardIndex, null);
    }
  }
 private void onFirstPhaseResult(
     int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) {
   result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId()));
   processFirstPhaseResult(shardIndex, result);
   // we need to increment successful ops first before we compare the exit condition otherwise if
   // we
   // are fast we could concurrently update totalOps but then preempt one of the threads which can
   // cause the successor to read a wrong value from successfulOps if second phase is very fast ie.
   // count etc.
   successfulOps.incrementAndGet();
   // increment all the "future" shards to update the total ops since we some may work and some may
   // not...
   // and when that happens, we break on total ops, so we must maintain them
   final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
   if (xTotalOps == expectedTotalOps) {
     try {
       innerMoveToSecondPhase();
     } catch (Exception e) {
       if (logger.isDebugEnabled()) {
         logger.debug(
             (Supplier<?>)
                 () ->
                     new ParameterizedMessage(
                         "{}: Failed to execute [{}] while moving to second phase",
                         shardIt.shardId(),
                         request),
             e);
       }
       raiseEarlyFailure(
           new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
     }
   } else if (xTotalOps > expectedTotalOps) {
     raiseEarlyFailure(
         new IllegalStateException(
             "unexpected higher total ops ["
                 + xTotalOps
                 + "] compared "
                 + "to expected ["
                 + expectedTotalOps
                 + "]"));
   }
 }