/** Creates a test environment with bin, config and plugins directories. */
 static Tuple<Path, Environment> createEnv(FileSystem fs, Function<String, Path> temp)
     throws IOException {
   Path home = temp.apply("install-plugin-command-tests");
   Files.createDirectories(home.resolve("bin"));
   Files.createFile(home.resolve("bin").resolve("elasticsearch"));
   Files.createDirectories(home.resolve("config"));
   Files.createFile(home.resolve("config").resolve("elasticsearch.yml"));
   Path plugins = Files.createDirectories(home.resolve("plugins"));
   assertTrue(Files.exists(plugins));
   Settings settings = Settings.builder().put("path.home", home).build();
   return Tuple.tuple(home, new Environment(settings));
 }
 private static Tuple<Integer, Integer> randomPrimariesAndReplicas(final int numNodes) {
   final int numPrimaries;
   final int numReplicas;
   if (randomBoolean()) {
     // test with some nodes having no shards
     numPrimaries = 1;
     numReplicas = randomIntBetween(0, numNodes - 2);
   } else {
     // test with all nodes having at least one shard
     numPrimaries = randomIntBetween(1, 5);
     numReplicas = numNodes - 1;
   }
   return Tuple.tuple(numPrimaries, numReplicas);
 }
示例#3
0
 /**
  * Returns a tuple containing random stored field values and their corresponding expected values
  * once printed out via {@link
  * org.elasticsearch.common.xcontent.ToXContent#toXContent(XContentBuilder, ToXContent.Params)}
  * and parsed back via {@link org.elasticsearch.common.xcontent.XContentParser#objectText()}.
  * Generates values based on what can get printed out. Stored fields values are retrieved from
  * lucene and converted via {@link
  * org.elasticsearch.index.mapper.MappedFieldType#valueForDisplay(Object)} to either strings,
  * numbers or booleans.
  *
  * @param random Random generator
  * @param xContentType the content type, used to determine what the expected values are for float
  *     numbers.
  */
 public static Tuple<List<Object>, List<Object>> randomStoredFieldValues(
     Random random, XContentType xContentType) {
   int numValues = RandomNumbers.randomIntBetween(random, 1, 5);
   List<Object> originalValues = new ArrayList<>();
   List<Object> expectedParsedValues = new ArrayList<>();
   int dataType = RandomNumbers.randomIntBetween(random, 0, 8);
   for (int i = 0; i < numValues; i++) {
     switch (dataType) {
       case 0:
         long randomLong = random.nextLong();
         originalValues.add(randomLong);
         expectedParsedValues.add(randomLong);
         break;
       case 1:
         int randomInt = random.nextInt();
         originalValues.add(randomInt);
         expectedParsedValues.add(randomInt);
         break;
       case 2:
         Short randomShort = (short) random.nextInt();
         originalValues.add(randomShort);
         expectedParsedValues.add(randomShort.intValue());
         break;
       case 3:
         Byte randomByte = (byte) random.nextInt();
         originalValues.add(randomByte);
         expectedParsedValues.add(randomByte.intValue());
         break;
       case 4:
         double randomDouble = random.nextDouble();
         originalValues.add(randomDouble);
         expectedParsedValues.add(randomDouble);
         break;
       case 5:
         Float randomFloat = random.nextFloat();
         originalValues.add(randomFloat);
         if (xContentType == XContentType.CBOR) {
           // with CBOR we get back a float
           expectedParsedValues.add(randomFloat);
         } else if (xContentType == XContentType.SMILE) {
           // with SMILE we get back a double
           expectedParsedValues.add(randomFloat.doubleValue());
         } else {
           // with JSON AND YAML we get back a double, but with float precision.
           expectedParsedValues.add(Double.parseDouble(randomFloat.toString()));
         }
         break;
       case 6:
         boolean randomBoolean = random.nextBoolean();
         originalValues.add(randomBoolean);
         expectedParsedValues.add(randomBoolean);
         break;
       case 7:
         String randomString =
             random.nextBoolean()
                 ? RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)
                 : randomUnicodeOfLengthBetween(random, 3, 10);
         originalValues.add(randomString);
         expectedParsedValues.add(randomString);
         break;
       case 8:
         byte[] randomBytes =
             RandomStrings.randomUnicodeOfLengthBetween(random, 10, 50)
                 .getBytes(StandardCharsets.UTF_8);
         BytesArray randomBytesArray = new BytesArray(randomBytes);
         originalValues.add(randomBytesArray);
         if (xContentType == XContentType.JSON || xContentType == XContentType.YAML) {
           // JSON and YAML write the base64 format
           expectedParsedValues.add(Base64.getEncoder().encodeToString(randomBytes));
         } else {
           // SMILE and CBOR write the original bytes as they support binary format
           expectedParsedValues.add(randomBytesArray);
         }
         break;
       default:
         throw new UnsupportedOperationException();
     }
   }
   return Tuple.tuple(originalValues, expectedParsedValues);
 }
  static {
    buildersByType =
        MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
            .put("string", new PagedBytesIndexFieldData.Builder())
            .put("float", new FloatArrayIndexFieldData.Builder())
            .put("double", new DoubleArrayIndexFieldData.Builder())
            .put(
                "byte",
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.BYTE))
            .put(
                "short",
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.SHORT))
            .put(
                "int",
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.INT))
            .put(
                "long",
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.LONG))
            .put("geo_point", new GeoPointDoubleArrayIndexFieldData.Builder())
            .put(ParentFieldMapper.NAME, new ParentChildIndexFieldData.Builder())
            .put("binary", new DisabledIndexFieldData.Builder())
            .immutableMap();

    docValuesBuildersByType =
        MapBuilder.<String, IndexFieldData.Builder>newMapBuilder()
            .put("string", new DocValuesIndexFieldData.Builder())
            .put(
                "float",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.FLOAT))
            .put(
                "double",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.DOUBLE))
            .put(
                "byte",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.BYTE))
            .put(
                "short",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.SHORT))
            .put(
                "int",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.INT))
            .put(
                "long",
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.LONG))
            .put("geo_point", new GeoPointBinaryDVIndexFieldData.Builder())
            .put("binary", new BytesBinaryDVIndexFieldData.Builder())
            .immutableMap();

    buildersByTypeAndFormat =
        MapBuilder.<Tuple<String, String>, IndexFieldData.Builder>newMapBuilder()
            .put(Tuple.tuple("string", PAGED_BYTES_FORMAT), new PagedBytesIndexFieldData.Builder())
            .put(Tuple.tuple("string", FST_FORMAT), new FSTBytesIndexFieldData.Builder())
            .put(Tuple.tuple("string", DOC_VALUES_FORMAT), new DocValuesIndexFieldData.Builder())
            .put(Tuple.tuple("string", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(Tuple.tuple("float", ARRAY_FORMAT), new FloatArrayIndexFieldData.Builder())
            .put(
                Tuple.tuple("float", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.FLOAT))
            .put(Tuple.tuple("float", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(Tuple.tuple("double", ARRAY_FORMAT), new DoubleArrayIndexFieldData.Builder())
            .put(
                Tuple.tuple("double", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.DOUBLE))
            .put(Tuple.tuple("double", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("byte", ARRAY_FORMAT),
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.BYTE))
            .put(
                Tuple.tuple("byte", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.BYTE))
            .put(Tuple.tuple("byte", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("short", ARRAY_FORMAT),
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.SHORT))
            .put(
                Tuple.tuple("short", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.SHORT))
            .put(Tuple.tuple("short", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("int", ARRAY_FORMAT),
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.INT))
            .put(
                Tuple.tuple("int", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.INT))
            .put(Tuple.tuple("int", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("long", ARRAY_FORMAT),
                new PackedArrayIndexFieldData.Builder()
                    .setNumericType(IndexNumericFieldData.NumericType.LONG))
            .put(
                Tuple.tuple("long", DOC_VALUES_FORMAT),
                new DocValuesIndexFieldData.Builder()
                    .numericType(IndexNumericFieldData.NumericType.LONG))
            .put(Tuple.tuple("long", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("geo_point", ARRAY_FORMAT),
                new GeoPointDoubleArrayIndexFieldData.Builder())
            .put(
                Tuple.tuple("geo_point", DOC_VALUES_FORMAT),
                new GeoPointBinaryDVIndexFieldData.Builder())
            .put(Tuple.tuple("geo_point", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .put(
                Tuple.tuple("geo_point", COMPRESSED_FORMAT),
                new GeoPointCompressedIndexFieldData.Builder())
            .put(
                Tuple.tuple("binary", DOC_VALUES_FORMAT), new BytesBinaryDVIndexFieldData.Builder())
            .put(Tuple.tuple("binary", DISABLED_FORMAT), new DisabledIndexFieldData.Builder())
            .immutableMap();
  }
  public <IFD extends IndexFieldData<?>> IFD getForField(FieldMapper<?> mapper) {
    final FieldMapper.Names fieldNames = mapper.names();
    final FieldDataType type = mapper.fieldDataType();
    final boolean docValues = mapper.hasDocValues();
    IndexFieldData<?> fieldData = loadedFieldData.get(fieldNames.indexName());
    if (fieldData == null) {
      synchronized (loadedFieldData) {
        fieldData = loadedFieldData.get(fieldNames.indexName());
        if (fieldData == null) {
          IndexFieldData.Builder builder = null;
          String format = type.getFormat(indexSettings);
          if (format != null
              && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format)
              && !docValues) {
            logger.warn(
                "field ["
                    + fieldNames.fullName()
                    + "] has no doc values, will use default field data format");
            format = null;
          }
          if (format != null) {
            builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
            if (builder == null) {
              logger.warn(
                  "failed to find format ["
                      + format
                      + "] for field ["
                      + fieldNames.fullName()
                      + "], will use default");
            }
          }
          if (builder == null && docValues) {
            builder = docValuesBuildersByType.get(type.getType());
          }
          if (builder == null) {
            builder = buildersByType.get(type.getType());
          }
          if (builder == null) {
            throw new ElasticsearchIllegalArgumentException(
                "failed to find field data builder for field "
                    + fieldNames.fullName()
                    + ", and type "
                    + type.getType());
          }

          IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName());
          if (cache == null) {
            //  we default to node level cache, which in turn defaults to be unbounded
            // this means changing the node level settings is simple, just set the bounds there
            String cacheType =
                type.getSettings().get("cache", indexSettings.get("index.fielddata.cache", "node"));
            if ("resident".equals(cacheType)) {
              cache =
                  new IndexFieldDataCache.Resident(
                      indexService, fieldNames, type, indicesFieldDataCacheListener);
            } else if ("soft".equals(cacheType)) {
              cache =
                  new IndexFieldDataCache.Soft(
                      indexService, fieldNames, type, indicesFieldDataCacheListener);
            } else if ("node".equals(cacheType)) {
              cache =
                  indicesFieldDataCache.buildIndexFieldDataCache(
                      indexService, index, fieldNames, type);
            } else {
              throw new ElasticsearchIllegalArgumentException(
                  "cache type not supported ["
                      + cacheType
                      + "] for field ["
                      + fieldNames.fullName()
                      + "]");
            }
            fieldDataCaches.put(fieldNames.indexName(), cache);
          }

          GlobalOrdinalsBuilder globalOrdinalBuilder =
              new InternalGlobalOrdinalsBuilder(index(), indexSettings);
          fieldData =
              builder.build(
                  index,
                  indexSettings,
                  mapper,
                  cache,
                  circuitBreakerService,
                  indexService.mapperService(),
                  globalOrdinalBuilder);
          loadedFieldData.put(fieldNames.indexName(), fieldData);
        }
      }
    }
    return (IFD) fieldData;
  }
  @SuppressWarnings("unchecked")
  public <IFD extends IndexFieldData<?>> IFD getForField(MappedFieldType fieldType) {
    final Names fieldNames = fieldType.names();
    final FieldDataType type = fieldType.fieldDataType();
    if (type == null) {
      throw new IllegalArgumentException(
          "found no fielddata type for field [" + fieldNames.fullName() + "]");
    }
    final boolean docValues = fieldType.hasDocValues();
    IndexFieldData.Builder builder = null;
    String format = type.getFormat(indexSettings);
    if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
      logger.warn(
          "field ["
              + fieldNames.fullName()
              + "] has no doc values, will use default field data format");
      format = null;
    }
    if (format != null) {
      builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
      if (builder == null) {
        logger.warn(
            "failed to find format ["
                + format
                + "] for field ["
                + fieldNames.fullName()
                + "], will use default");
      }
    }
    if (builder == null && docValues) {
      builder = docValuesBuildersByType.get(type.getType());
    }
    if (builder == null) {
      builder = buildersByType.get(type.getType());
    }
    if (builder == null) {
      throw new IllegalArgumentException(
          "failed to find field data builder for field "
              + fieldNames.fullName()
              + ", and type "
              + type.getType());
    }

    IndexFieldDataCache cache;
    synchronized (this) {
      cache = fieldDataCaches.get(fieldNames.indexName());
      if (cache == null) {
        //  we default to node level cache, which in turn defaults to be unbounded
        // this means changing the node level settings is simple, just set the bounds there
        String cacheType =
            type.getSettings()
                .get("cache", indexSettings.get(FIELDDATA_CACHE_KEY, FIELDDATA_CACHE_VALUE_NODE));
        if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
          cache = indicesFieldDataCache.buildIndexFieldDataCache(listener, index, fieldNames, type);
        } else if ("none".equals(cacheType)) {
          cache = new IndexFieldDataCache.None();
        } else {
          throw new IllegalArgumentException(
              "cache type not supported ["
                  + cacheType
                  + "] for field ["
                  + fieldNames.fullName()
                  + "]");
        }
        fieldDataCaches.put(fieldNames.indexName(), cache);
      }
    }

    return (IFD)
        builder.build(index, indexSettings, fieldType, cache, circuitBreakerService, mapperService);
  }
 @Override
 protected PrimaryResponse<IngestShardResponse, IngestShardRequest> shardOperationOnPrimary(
     ClusterState clusterState, PrimaryOperationRequest shardRequest) {
   final IngestShardRequest request = shardRequest.request;
   IndexShard indexShard =
       indicesService
           .indexServiceSafe(shardRequest.request.index())
           .shardSafe(shardRequest.shardId);
   int successSize = 0;
   List<IngestItemFailure> failure = newLinkedList();
   int size = request.items().size();
   long[] versions = new long[size];
   Set<Tuple<String, String>> mappingsToUpdate = newHashSet();
   for (int i = 0; i < size; i++) {
     IngestItemRequest item = request.items().get(i);
     if (item.request() instanceof IndexRequest) {
       IndexRequest indexRequest = (IndexRequest) item.request();
       Engine.IndexingOperation op = null;
       try {
         // validate, if routing is required, that we got routing
         MappingMetaData mappingMd =
             clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type());
         if (mappingMd != null && mappingMd.routing().required()) {
           if (indexRequest.routing() == null) {
             throw new RoutingMissingException(
                 indexRequest.index(), indexRequest.type(), indexRequest.id());
           }
         }
         SourceToParse sourceToParse =
             SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source())
                 .type(indexRequest.type())
                 .id(indexRequest.id())
                 .routing(indexRequest.routing())
                 .parent(indexRequest.parent())
                 .timestamp(indexRequest.timestamp())
                 .ttl(indexRequest.ttl());
         long version;
         if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
           Engine.Index index =
               indexShard
                   .prepareIndex(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = index;
           indexShard.index(index);
           version = index.version();
         } else {
           Engine.Create create =
               indexShard
                   .prepareCreate(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = create;
           indexShard.create(create);
           version = create.version();
         }
         versions[i] = indexRequest.version();
         // update the version on request so it will happen on the replicas
         indexRequest.version(version);
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         } else {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       } finally {
         // update mapping on master if needed, we won't update changes to the same type, since
         // once its changed, it won't have mappers added
         if (op != null && op.parsedDoc().mappingsModified()) {
           mappingsToUpdate.add(Tuple.tuple(indexRequest.index(), indexRequest.type()));
         }
       }
     } else if (item.request() instanceof DeleteRequest) {
       DeleteRequest deleteRequest = (DeleteRequest) item.request();
       try {
         Engine.Delete delete =
             indexShard
                 .prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version())
                 .versionType(deleteRequest.versionType())
                 .origin(Engine.Operation.Origin.PRIMARY);
         indexShard.delete(delete);
         // update the request with teh version so it will go to the replicas
         deleteRequest.version(delete.version());
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.trace(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         } else {
           logger.debug(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       }
     }
   }
   if (!mappingsToUpdate.isEmpty()) {
     for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) {
       logger.info("mapping update {} {}", mappingToUpdate.v1(), mappingToUpdate.v2());
       updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2());
     }
   }
   IngestShardResponse response =
       new IngestShardResponse(
           new ShardId(request.index(), request.shardId()), successSize, failure);
   return new PrimaryResponse<IngestShardResponse, IngestShardRequest>(
       shardRequest.request, response, null);
 }
  public void testClusterStateBatchedUpdates() throws BrokenBarrierException, InterruptedException {
    AtomicInteger counter = new AtomicInteger();
    class Task {
      private AtomicBoolean state = new AtomicBoolean();
      private final int id;

      Task(int id) {
        this.id = id;
      }

      public void execute() {
        if (!state.compareAndSet(false, true)) {
          throw new IllegalStateException();
        } else {
          counter.incrementAndGet();
        }
      }

      @Override
      public boolean equals(Object o) {
        if (this == o) {
          return true;
        }
        if (o == null || getClass() != o.getClass()) {
          return false;
        }
        Task task = (Task) o;
        return id == task.id;
      }

      @Override
      public int hashCode() {
        return id;
      }

      @Override
      public String toString() {
        return Integer.toString(id);
      }
    }

    int numberOfThreads = randomIntBetween(2, 8);
    int taskSubmissionsPerThread = randomIntBetween(1, 64);
    int numberOfExecutors = Math.max(1, numberOfThreads / 4);
    final Semaphore semaphore = new Semaphore(numberOfExecutors);

    class TaskExecutor implements ClusterStateTaskExecutor<Task> {
      private final List<Set<Task>> taskGroups;
      private AtomicInteger counter = new AtomicInteger();
      private AtomicInteger batches = new AtomicInteger();
      private AtomicInteger published = new AtomicInteger();

      public TaskExecutor(List<Set<Task>> taskGroups) {
        this.taskGroups = taskGroups;
      }

      @Override
      public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks)
          throws Exception {
        for (Set<Task> expectedSet : taskGroups) {
          long count = tasks.stream().filter(expectedSet::contains).count();
          assertThat(
              "batched set should be executed together or not at all. Expected "
                  + expectedSet
                  + "s. Executing "
                  + tasks,
              count,
              anyOf(equalTo(0L), equalTo((long) expectedSet.size())));
        }
        tasks.forEach(Task::execute);
        counter.addAndGet(tasks.size());
        ClusterState maybeUpdatedClusterState = currentState;
        if (randomBoolean()) {
          maybeUpdatedClusterState = ClusterState.builder(currentState).build();
          batches.incrementAndGet();
          semaphore.acquire();
        }
        return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
      }

      @Override
      public boolean runOnlyOnMaster() {
        return false;
      }

      @Override
      public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {
        published.incrementAndGet();
        semaphore.release();
      }
    }

    ConcurrentMap<String, AtomicInteger> processedStates = new ConcurrentHashMap<>();

    List<Set<Task>> taskGroups = new ArrayList<>();
    List<TaskExecutor> executors = new ArrayList<>();
    for (int i = 0; i < numberOfExecutors; i++) {
      executors.add(new TaskExecutor(taskGroups));
    }

    // randomly assign tasks to executors
    List<Tuple<TaskExecutor, Set<Task>>> assignments = new ArrayList<>();
    int taskId = 0;
    for (int i = 0; i < numberOfThreads; i++) {
      for (int j = 0; j < taskSubmissionsPerThread; j++) {
        TaskExecutor executor = randomFrom(executors);
        Set<Task> tasks = new HashSet<>();
        for (int t = randomInt(3); t >= 0; t--) {
          tasks.add(new Task(taskId++));
        }
        taskGroups.add(tasks);
        assignments.add(Tuple.tuple(executor, tasks));
      }
    }

    Map<TaskExecutor, Integer> counts = new HashMap<>();
    int totalTaskCount = 0;
    for (Tuple<TaskExecutor, Set<Task>> assignment : assignments) {
      final int taskCount = assignment.v2().size();
      counts.merge(assignment.v1(), taskCount, (previous, count) -> previous + count);
      totalTaskCount += taskCount;
    }
    final CountDownLatch updateLatch = new CountDownLatch(totalTaskCount);
    final ClusterStateTaskListener listener =
        new ClusterStateTaskListener() {
          @Override
          public void onFailure(String source, Exception e) {
            fail(ExceptionsHelper.detailedMessage(e));
          }

          @Override
          public void clusterStateProcessed(
              String source, ClusterState oldState, ClusterState newState) {
            processedStates.computeIfAbsent(source, key -> new AtomicInteger()).incrementAndGet();
            updateLatch.countDown();
          }
        };

    final ConcurrentMap<String, AtomicInteger> submittedTasksPerThread = new ConcurrentHashMap<>();
    CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
    for (int i = 0; i < numberOfThreads; i++) {
      final int index = i;
      Thread thread =
          new Thread(
              () -> {
                final String threadName = Thread.currentThread().getName();
                try {
                  barrier.await();
                  for (int j = 0; j < taskSubmissionsPerThread; j++) {
                    Tuple<TaskExecutor, Set<Task>> assignment =
                        assignments.get(index * taskSubmissionsPerThread + j);
                    final Set<Task> tasks = assignment.v2();
                    submittedTasksPerThread
                        .computeIfAbsent(threadName, key -> new AtomicInteger())
                        .addAndGet(tasks.size());
                    final TaskExecutor executor = assignment.v1();
                    if (tasks.size() == 1) {
                      clusterService.submitStateUpdateTask(
                          threadName,
                          tasks.stream().findFirst().get(),
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor,
                          listener);
                    } else {
                      Map<Task, ClusterStateTaskListener> taskListeners = new HashMap<>();
                      tasks.stream().forEach(t -> taskListeners.put(t, listener));
                      clusterService.submitStateUpdateTasks(
                          threadName,
                          taskListeners,
                          ClusterStateTaskConfig.build(randomFrom(Priority.values())),
                          executor);
                    }
                  }
                  barrier.await();
                } catch (BrokenBarrierException | InterruptedException e) {
                  throw new AssertionError(e);
                }
              });
      thread.start();
    }

    // wait for all threads to be ready
    barrier.await();
    // wait for all threads to finish
    barrier.await();

    // wait until all the cluster state updates have been processed
    updateLatch.await();
    // and until all of the publication callbacks have completed
    semaphore.acquire(numberOfExecutors);

    // assert the number of executed tasks is correct
    assertEquals(totalTaskCount, counter.get());

    // assert each executor executed the correct number of tasks
    for (TaskExecutor executor : executors) {
      if (counts.containsKey(executor)) {
        assertEquals((int) counts.get(executor), executor.counter.get());
        assertEquals(executor.batches.get(), executor.published.get());
      }
    }

    // assert the correct number of clusterStateProcessed events were triggered
    for (Map.Entry<String, AtomicInteger> entry : processedStates.entrySet()) {
      assertThat(submittedTasksPerThread, hasKey(entry.getKey()));
      assertEquals(
          "not all tasks submitted by " + entry.getKey() + " received a processed event",
          entry.getValue().get(),
          submittedTasksPerThread.get(entry.getKey()).get());
    }
  }