@Test
  public void testBenchmarkWithErrors() {
    List<SearchRequest> reqList = new ArrayList<>();
    int numQueries = scaledRandomIntBetween(20, 100);
    int numErrors = scaledRandomIntBetween(1, numQueries);
    final boolean containsFatal = randomBoolean();
    if (containsFatal) {
      ScriptScoreFunctionBuilder scriptFunction =
          scriptFunction("DOES NOT COMPILE - fails on any shard");
      SearchRequest searchRequest =
          searchRequest()
              .source(
                  searchSource()
                      .query(functionScoreQuery(FilterBuilders.matchAllFilter(), scriptFunction)));
      reqList.add(searchRequest);
    }
    for (int i = 0; reqList.size() < numErrors; i++) {
      ScriptScoreFunctionBuilder scriptFunction = scriptFunction("throw new RuntimeException();");
      SearchRequest searchRequest =
          searchRequest()
              .source(
                  searchSource()
                      .query(functionScoreQuery(FilterBuilders.matchAllFilter(), scriptFunction)));
      reqList.add(searchRequest);
    }
    logger.info("--> run with [{}] errors ", numErrors);
    for (int i = 0; reqList.size() < numQueries; i++) {

      reqList.add(BenchmarkTestUtil.randomSearch(client(), indices));
    }
    Collections.shuffle(reqList, getRandom());

    final BenchmarkRequest request =
        BenchmarkTestUtil.randomRequest(
            client(),
            indices,
            numExecutorNodes,
            competitionSettingsMap,
            reqList.toArray(new SearchRequest[0]));
    logger.info(
        "--> Submitting benchmark - competitors [{}] iterations [{}]",
        request.competitors().size(),
        request.settings().iterations());
    final BenchmarkResponse response = client().bench(request).actionGet();

    assertThat(response, notNullValue());
    if (response.hasErrors() || containsFatal) {
      assertThat(response.state(), equalTo(BenchmarkResponse.State.FAILED));
    } else {
      assertThat(response.state(), equalTo(BenchmarkResponse.State.COMPLETE));
      for (CompetitionResult result : response.competitionResults().values()) {
        assertThat(result.nodeResults().size(), equalTo(numExecutorNodes));
        validateCompetitionResult(
            result, competitionSettingsMap.get(result.competitionName()), true);
      }
    }
    assertThat(response.benchmarkName(), equalTo(BENCHMARK_NAME));
  }
  @Test
  public void testCollectDocId() throws Exception {
    setUpCharacters();
    Planner.Context plannerContext = new Planner.Context(clusterService(), UUID.randomUUID());
    CollectPhase collectNode = createCollectNode(plannerContext, false);

    List<Bucket> results = getBuckets(collectNode);

    assertThat(results.size(), is(2));
    int seenJobSearchContextId = -1;
    for (Bucket rows : results) {
      assertThat(rows.size(), is(1));
      Object docIdCol = rows.iterator().next().get(0);
      assertNotNull(docIdCol);
      assertThat(docIdCol, instanceOf(Long.class));
      long docId = (long) docIdCol;
      // unpack jobSearchContextId and reader doc id from docId
      int jobSearchContextId = (int) (docId >> 32);
      int doc = (int) docId;
      assertThat(doc, is(0));
      assertThat(jobSearchContextId, greaterThan(-1));
      if (seenJobSearchContextId == -1) {
        assertThat(jobSearchContextId, anyOf(is(0), is(1)));
        seenJobSearchContextId = jobSearchContextId;
      } else {
        assertThat(jobSearchContextId, is(seenJobSearchContextId == 0 ? 1 : 0));
      }
    }
  }
 private String[] randomIndicesOrAliases() {
   int count = randomIntBetween(1, indices.size() * 2); // every index has an alias
   String[] indices = new String[count];
   for (int i = 0; i < count; i++) {
     indices[i] = randomIndexOrAlias();
   }
   return indices;
 }
  @Test(expected = UnexpectedInvocation.class) // Uses of JMockit API: 2
  public void verifyThatInvocationsNeverHappenedWhenTheyDid(@Mocked List<String> mockTwo) {
    mockedList.add("one");
    mockTwo.size();

    new FullVerifications() {
      {
        mockedList.add("one");
      }
    };
  }
  @Test(expected = UnexpectedInvocation.class) // Uses of JMockit API: 1
  public void verifyAllInvocationsInOrderWhenMoreOfThemHappen() {
    mockedList.add("one");
    mockedList.add("two");
    mockedList.size();

    new FullVerificationsInOrder() {
      {
        mockedList.add("one");
        mockedList.add("two");
      }
    };
  }
  @Test // Uses of JMockit API: 1
  public void verifyAllInvocationsInOrder() {
    mockedList.add("one");
    mockedList.size();
    mockedList.add("two");

    new FullVerificationsInOrder() {
      {
        mockedList.add("one");
        mockedList.size();
        mockedList.add("two");
      }
    };
  }
  @Test
  public void testFetchProjection() throws Exception {
    setUpCharacters();

    Plan plan = analyzeAndPlan("select id, name, substr(name, 2) from characters order by id");
    assertThat(plan, instanceOf(QueryThenFetch.class));
    QueryThenFetch qtf = (QueryThenFetch) plan;

    assertThat(qtf.collectNode().keepContextForFetcher(), is(true));
    assertThat(
        ((FetchProjection) qtf.mergeNode().projections().get(1)).jobSearchContextIdToNode(),
        notNullValue());
    assertThat(
        ((FetchProjection) qtf.mergeNode().projections().get(1)).jobSearchContextIdToShard(),
        notNullValue());

    Job job = executor.newJob(plan);
    ListenableFuture<List<TaskResult>> results = Futures.allAsList(executor.execute(job));

    final List<Object[]> resultingRows = new ArrayList<>();
    final CountDownLatch latch = new CountDownLatch(1);
    Futures.addCallback(
        results,
        new FutureCallback<List<TaskResult>>() {
          @Override
          public void onSuccess(List<TaskResult> resultList) {
            for (Row row : resultList.get(0).rows()) {
              resultingRows.add(row.materialize());
            }
            latch.countDown();
          }

          @Override
          public void onFailure(Throwable t) {
            latch.countDown();
            fail(t.getMessage());
          }
        });

    latch.await();
    assertThat(resultingRows.size(), is(2));
    assertThat(resultingRows.get(0).length, is(3));
    assertThat((Integer) resultingRows.get(0)[0], is(1));
    assertThat((BytesRef) resultingRows.get(0)[1], is(new BytesRef("Arthur")));
    assertThat((BytesRef) resultingRows.get(0)[2], is(new BytesRef("rthur")));
    assertThat((Integer) resultingRows.get(1)[0], is(2));
    assertThat((BytesRef) resultingRows.get(1)[1], is(new BytesRef("Ford")));
    assertThat((BytesRef) resultingRows.get(1)[2], is(new BytesRef("ord")));
  }
예제 #8
0
  public OrderWS buildOrder(int userId, List<Integer> itemIds, BigDecimal linePrice) {
    OrderWS order = new OrderWS();
    order.setUserId(userId);
    order.setBillingTypeId(Constants.ORDER_BILLING_POST_PAID);
    order.setPeriod(ORDER_PERIOD_ONCE); // once
    order.setCurrencyId(CURRENCY_USD);
    order.setActiveSince(new Date());
    order.setProrateFlag(Boolean.FALSE);

    ArrayList<OrderLineWS> lines = new ArrayList<OrderLineWS>(itemIds.size());
    for (int i = 0; i < itemIds.size(); i++) {
      OrderLineWS nextLine = new OrderLineWS();
      nextLine.setTypeId(Constants.ORDER_LINE_TYPE_ITEM);
      nextLine.setDescription("Order line: " + i);
      nextLine.setItemId(itemIds.get(i));
      nextLine.setQuantity(1);
      nextLine.setPrice(linePrice);
      nextLine.setAmount(nextLine.getQuantityAsDecimal().multiply(linePrice));

      lines.add(nextLine);
    }
    order.setOrderLines(lines.toArray(new OrderLineWS[lines.size()]));
    return order;
  }
 private static void assertIndicesSubset(List<String> indices, String... actions) {
   // indices returned by each bulk shard request need to be a subset of the original indices
   for (String action : actions) {
     List<TransportRequest> requests = consumeTransportRequests(action);
     assertThat(
         "no internal requests intercepted for action [" + action + "]",
         requests.size(),
         greaterThan(0));
     for (TransportRequest internalRequest : requests) {
       assertThat(internalRequest, instanceOf(IndicesRequest.class));
       for (String index : ((IndicesRequest) internalRequest).indices()) {
         assertThat(indices, hasItem(index));
       }
     }
   }
 }
  @Test
  public void testRetrieveAllFunds() {
    FundsHelper fh =
        FundsHelper.create(Utils.randomNameGenerator("", 10))
            .externalId(Utils.randomNameGenerator("fund-", 5))
            .build();
    String jsonData = fh.toJSON();

    final Long fundID = createFund(jsonData, this.requestSpec, this.statusOkResponseSpec);
    Assert.assertNotNull(fundID);

    List<FundsHelper> fhList =
        FundsResourceHandler.retrieveAllFunds(this.requestSpec, this.statusOkResponseSpec);

    Assert.assertNotNull(fhList);
    Assert.assertThat(fhList.size(), greaterThanOrEqualTo(1));
    Assert.assertThat(fhList, hasItem(fh));
  }
  @Test
  public void reads_a_list_of_entries() throws Exception {
    ZipContents zipContents = new ZipContents(new File("./src/test/resource/test.zip"));

    try {
      zipContents.open();

      List<ZipContentsEntry> entries = zipContents.entries();

      assertThat(entries.size(), is(6));
      ZipContentsEntry firstEntry = entries.get(0);
      assertThat(firstEntry.directory(), is("/"));
      assertThat(firstEntry.name(), is("file_1.txt"));
      assertThat(firstEntry.type(), is("txt"));
    } finally {
      zipContents.close();
    }
  }
예제 #12
0
  private Integer getOrCreateSuspendedStatus(JbillingAPI api) {
    List<AgeingWS> steps = Arrays.asList(api.getAgeingConfiguration(LANGUAGE_ID));

    for (AgeingWS step : steps) {
      if (step.getSuspended().booleanValue()) {
        return step.getStatusId();
      }
    }

    AgeingWS suspendStep = new AgeingWS();
    suspendStep.setSuspended(Boolean.TRUE);
    suspendStep.setDays(Integer.valueOf(180));
    suspendStep.setStatusStr("Ageing Step 180");
    suspendStep.setFailedLoginMessage("You are suspended");
    suspendStep.setWelcomeMessage("Welcome");
    steps.add(suspendStep);
    api.saveAgeingConfiguration(steps.toArray(new AgeingWS[steps.size()]), LANGUAGE_ID);
    return getOrCreateOrderChangeStatusApply(api);
  }
예제 #13
0
 private static void assertSameIndices(
     IndicesRequest originalRequest, boolean optional, String... actions) {
   for (String action : actions) {
     List<TransportRequest> requests = consumeTransportRequests(action);
     if (!optional) {
       assertThat(
           "no internal requests intercepted for action [" + action + "]",
           requests.size(),
           greaterThan(0));
     }
     for (TransportRequest internalRequest : requests) {
       assertThat(internalRequest, instanceOf(IndicesRequest.class));
       assertThat(
           internalRequest.getClass().getName(),
           ((IndicesRequest) internalRequest).indices(),
           equalTo(originalRequest.indices()));
       assertThat(
           ((IndicesRequest) internalRequest).indicesOptions(),
           equalTo(originalRequest.indicesOptions()));
     }
   }
 }
예제 #14
0
  @Test
  @Slow
  public void testConcurrentUpdateWithRetryOnConflict() throws Exception {
    final boolean useBulkApi = randomBoolean();
    createIndex();
    ensureGreen();

    int numberOfThreads = scaledRandomIntBetween(2, 5);
    final CountDownLatch latch = new CountDownLatch(numberOfThreads);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final int numberOfUpdatesPerThread = scaledRandomIntBetween(100, 10000);
    final List<Throwable> failures = new CopyOnWriteArrayList<>();
    for (int i = 0; i < numberOfThreads; i++) {
      Runnable r =
          new Runnable() {

            @Override
            public void run() {
              try {
                startLatch.await();
                for (int i = 0; i < numberOfUpdatesPerThread; i++) {
                  if (useBulkApi) {
                    UpdateRequestBuilder updateRequestBuilder =
                        client()
                            .prepareUpdate("test", "type1", Integer.toString(i))
                            .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
                            .setRetryOnConflict(Integer.MAX_VALUE)
                            .setUpsert(jsonBuilder().startObject().field("field", 1).endObject());
                    client().prepareBulk().add(updateRequestBuilder).execute().actionGet();
                  } else {
                    client()
                        .prepareUpdate("test", "type1", Integer.toString(i))
                        .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
                        .setRetryOnConflict(Integer.MAX_VALUE)
                        .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
                        .execute()
                        .actionGet();
                  }
                }
              } catch (Throwable e) {
                failures.add(e);
              } finally {
                latch.countDown();
              }
            }
          };
      new Thread(r).start();
    }
    startLatch.countDown();
    latch.await();
    for (Throwable throwable : failures) {
      logger.info("Captured failure on concurrent update:", throwable);
    }
    assertThat(failures.size(), equalTo(0));
    for (int i = 0; i < numberOfUpdatesPerThread; i++) {
      GetResponse response =
          client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
      assertThat(response.getId(), equalTo(Integer.toString(i)));
      assertThat(response.isExists(), equalTo(true));
      assertThat(response.getVersion(), equalTo((long) numberOfThreads));
      assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads));
    }
  }
  @Test
  public void testFetchAction() throws Exception {
    setUpCharacters();

    Analysis analysis = analyze("select id, name from characters");
    QueryThenFetchConsumer queryThenFetchConsumer =
        internalCluster().getInstance(QueryThenFetchConsumer.class);
    Planner.Context plannerContext = new Planner.Context(clusterService(), UUID.randomUUID());
    ConsumerContext consumerContext = new ConsumerContext(analysis.rootRelation(), plannerContext);
    QueryThenFetch plan =
        (QueryThenFetch)
            queryThenFetchConsumer.consume(analysis.rootRelation(), consumerContext).plan();

    List<Bucket> results = getBuckets(plan.collectNode());

    TransportFetchNodeAction transportFetchNodeAction =
        internalCluster().getInstance(TransportFetchNodeAction.class);

    // extract docIds by nodeId and jobSearchContextId
    Map<String, LongArrayList> jobSearchContextDocIds = new HashMap<>();
    for (Bucket rows : results) {
      long docId = (long) rows.iterator().next().get(0);
      // unpack jobSearchContextId and reader doc id from docId
      int jobSearchContextId = (int) (docId >> 32);
      String nodeId = plannerContext.nodeId(jobSearchContextId);
      LongArrayList docIdsPerNode = jobSearchContextDocIds.get(nodeId);
      if (docIdsPerNode == null) {
        docIdsPerNode = new LongArrayList();
        jobSearchContextDocIds.put(nodeId, docIdsPerNode);
      }
      docIdsPerNode.add(docId);
    }

    Iterable<Projection> projections =
        Iterables.filter(
            plan.mergeNode().projections(), Predicates.instanceOf(FetchProjection.class));
    FetchProjection fetchProjection = (FetchProjection) Iterables.getOnlyElement(projections);
    RowInputSymbolVisitor rowInputSymbolVisitor =
        new RowInputSymbolVisitor(internalCluster().getInstance(Functions.class));
    RowInputSymbolVisitor.Context context =
        rowInputSymbolVisitor.extractImplementations(fetchProjection.outputSymbols());

    final CountDownLatch latch = new CountDownLatch(jobSearchContextDocIds.size());
    final List<Row> rows = new ArrayList<>();
    for (Map.Entry<String, LongArrayList> nodeEntry : jobSearchContextDocIds.entrySet()) {
      NodeFetchRequest nodeFetchRequest = new NodeFetchRequest();
      nodeFetchRequest.jobId(plan.collectNode().jobId());
      nodeFetchRequest.executionPhaseId(plan.collectNode().executionPhaseId());
      nodeFetchRequest.toFetchReferences(context.references());
      nodeFetchRequest.closeContext(true);
      nodeFetchRequest.jobSearchContextDocIds(nodeEntry.getValue());

      transportFetchNodeAction.execute(
          nodeEntry.getKey(),
          nodeFetchRequest,
          new ActionListener<NodeFetchResponse>() {
            @Override
            public void onResponse(NodeFetchResponse nodeFetchResponse) {
              for (Row row : nodeFetchResponse.rows()) {
                rows.add(row);
              }
              latch.countDown();
            }

            @Override
            public void onFailure(Throwable e) {
              latch.countDown();
              fail(e.getMessage());
            }
          });
    }
    latch.await();

    assertThat(rows.size(), is(2));
    for (Row row : rows) {
      assertThat((Integer) row.get(0), anyOf(is(1), is(2)));
      assertThat(
          (BytesRef) row.get(1), anyOf(is(new BytesRef("Arthur")), is(new BytesRef("Ford"))));
    }
  }
 boolean matches(List<?> list) {
   return list.size() == 2;
 }
  @Test
  public void testPendingUpdateTask() throws Exception {
    Settings settings = settingsBuilder().put("discovery.type", "local").build();
    String node_0 = internalCluster().startNode(settings);
    internalCluster().startNodeClient(settings);

    final ClusterService clusterService =
        internalCluster().getInstance(ClusterService.class, node_0);
    final CountDownLatch block1 = new CountDownLatch(1);
    final CountDownLatch invoked1 = new CountDownLatch(1);
    clusterService.submitStateUpdateTask(
        "1",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) {
            invoked1.countDown();
            try {
              block1.await();
            } catch (InterruptedException e) {
              fail();
            }
            return currentState;
          }

          @Override
          public void onFailure(String source, Throwable t) {
            invoked1.countDown();
            fail();
          }
        });
    invoked1.await();
    final CountDownLatch invoked2 = new CountDownLatch(9);
    for (int i = 2; i <= 10; i++) {
      clusterService.submitStateUpdateTask(
          Integer.toString(i),
          new ProcessedClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              return currentState;
            }

            @Override
            public void onFailure(String source, Throwable t) {
              fail();
            }

            @Override
            public void clusterStateProcessed(
                String source, ClusterState oldState, ClusterState newState) {
              invoked2.countDown();
            }
          });
    }

    // there might be other tasks in this node, make sure to only take the ones we add into account
    // in this test

    // The tasks can be re-ordered, so we need to check out-of-order
    Set<String> controlSources =
        new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
    List<PendingClusterTask> pendingClusterTasks = clusterService.pendingTasks();
    assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(10));
    assertThat(pendingClusterTasks.get(0).getSource().string(), equalTo("1"));
    assertThat(pendingClusterTasks.get(0).isExecuting(), equalTo(true));
    for (PendingClusterTask task : pendingClusterTasks) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());

    controlSources =
        new HashSet<>(Arrays.asList("1", "2", "3", "4", "5", "6", "7", "8", "9", "10"));
    PendingClusterTasksResponse response =
        internalCluster()
            .clientNodeClient()
            .admin()
            .cluster()
            .preparePendingClusterTasks()
            .execute()
            .actionGet();
    assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(10));
    assertThat(response.pendingTasks().get(0).getSource().string(), equalTo("1"));
    assertThat(response.pendingTasks().get(0).isExecuting(), equalTo(true));
    for (PendingClusterTask task : response) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());
    block1.countDown();
    invoked2.await();

    // whenever we test for no tasks, we need to awaitBusy since this is a live node
    assertTrue(
        awaitBusy(
            new Predicate<Object>() {
              @Override
              public boolean apply(Object input) {
                return clusterService.pendingTasks().isEmpty();
              }
            }));
    waitNoPendingTasksOnAll();

    final CountDownLatch block2 = new CountDownLatch(1);
    final CountDownLatch invoked3 = new CountDownLatch(1);
    clusterService.submitStateUpdateTask(
        "1",
        new ClusterStateUpdateTask() {
          @Override
          public ClusterState execute(ClusterState currentState) {
            invoked3.countDown();
            try {
              block2.await();
            } catch (InterruptedException e) {
              fail();
            }
            return currentState;
          }

          @Override
          public void onFailure(String source, Throwable t) {
            invoked3.countDown();
            fail();
          }
        });
    invoked3.await();

    for (int i = 2; i <= 5; i++) {
      clusterService.submitStateUpdateTask(
          Integer.toString(i),
          new ClusterStateUpdateTask() {
            @Override
            public ClusterState execute(ClusterState currentState) {
              return currentState;
            }

            @Override
            public void onFailure(String source, Throwable t) {
              fail();
            }
          });
    }
    Thread.sleep(100);

    pendingClusterTasks = clusterService.pendingTasks();
    assertThat(pendingClusterTasks.size(), greaterThanOrEqualTo(5));
    controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
    for (PendingClusterTask task : pendingClusterTasks) {
      controlSources.remove(task.getSource().string());
    }
    assertTrue(controlSources.isEmpty());

    response =
        internalCluster().clientNodeClient().admin().cluster().preparePendingClusterTasks().get();
    assertThat(response.pendingTasks().size(), greaterThanOrEqualTo(5));
    controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
    for (PendingClusterTask task : response) {
      if (controlSources.remove(task.getSource().string())) {
        assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
      }
    }
    assertTrue(controlSources.isEmpty());
    block2.countDown();
  }
예제 #18
0
  @Test
  @Slow
  public void stressUpdateDeleteConcurrency() throws Exception {
    // We create an index with merging disabled so that deletes don't get merged away
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .addMapping(
            "type1",
            XContentFactory.jsonBuilder()
                .startObject()
                .startObject("type1")
                .startObject("_timestamp")
                .field("enabled", true)
                .field("store", "yes")
                .endObject()
                .startObject("_ttl")
                .field("enabled", true)
                .field("store", "yes")
                .endObject()
                .endObject()
                .endObject())
        .setSettings(
            ImmutableSettings.builder()
                .put(MergePolicyModule.MERGE_POLICY_TYPE_KEY, NoMergePolicyProvider.class))
        .execute()
        .actionGet();
    ensureGreen();

    final int numberOfThreads = scaledRandomIntBetween(5, 10);
    final int numberOfIdsPerThread = scaledRandomIntBetween(3, 10);
    final int numberOfUpdatesPerId = scaledRandomIntBetween(100, 200);
    final int retryOnConflict = randomIntBetween(0, 1);
    final CountDownLatch latch = new CountDownLatch(numberOfThreads);
    final CountDownLatch startLatch = new CountDownLatch(1);
    final List<Throwable> failures = new CopyOnWriteArrayList<>();

    final class UpdateThread extends Thread {
      final Map<Integer, Integer> failedMap = new HashMap<>();
      final int numberOfIds;
      final int updatesPerId;
      final int maxUpdateRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
      final int maxDeleteRequests = numberOfIdsPerThread * numberOfUpdatesPerId;
      private final Semaphore updateRequestsOutstanding = new Semaphore(maxUpdateRequests);
      private final Semaphore deleteRequestsOutstanding = new Semaphore(maxDeleteRequests);

      public UpdateThread(int numberOfIds, int updatesPerId) {
        this.numberOfIds = numberOfIds;
        this.updatesPerId = updatesPerId;
      }

      final class UpdateListener implements ActionListener<UpdateResponse> {
        int id;

        public UpdateListener(int id) {
          this.id = id;
        }

        @Override
        public void onResponse(UpdateResponse updateResponse) {
          updateRequestsOutstanding.release(1);
        }

        @Override
        public void onFailure(Throwable e) {
          synchronized (failedMap) {
            incrementMapValue(id, failedMap);
          }
          updateRequestsOutstanding.release(1);
        }
      }

      final class DeleteListener implements ActionListener<DeleteResponse> {
        int id;

        public DeleteListener(int id) {
          this.id = id;
        }

        @Override
        public void onResponse(DeleteResponse deleteResponse) {
          deleteRequestsOutstanding.release(1);
        }

        @Override
        public void onFailure(Throwable e) {
          synchronized (failedMap) {
            incrementMapValue(id, failedMap);
          }
          deleteRequestsOutstanding.release(1);
        }
      }

      @Override
      public void run() {
        try {
          startLatch.await();
          for (int j = 0; j < numberOfIds; j++) {
            for (int k = 0; k < numberOfUpdatesPerId; ++k) {
              updateRequestsOutstanding.acquire();
              UpdateRequest ur =
                  client()
                      .prepareUpdate("test", "type1", Integer.toString(j))
                      .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
                      .setRetryOnConflict(retryOnConflict)
                      .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
                      .setListenerThreaded(false)
                      .request();
              client().update(ur, new UpdateListener(j));

              deleteRequestsOutstanding.acquire();
              DeleteRequest dr =
                  client()
                      .prepareDelete("test", "type1", Integer.toString(j))
                      .setListenerThreaded(false)
                      .setOperationThreaded(false)
                      .request();
              client().delete(dr, new DeleteListener(j));
            }
          }
        } catch (Throwable e) {
          logger.error("Something went wrong", e);
          failures.add(e);
        } finally {
          try {
            waitForOutstandingRequests(
                TimeValue.timeValueSeconds(60),
                updateRequestsOutstanding,
                maxUpdateRequests,
                "Update");
            waitForOutstandingRequests(
                TimeValue.timeValueSeconds(60),
                deleteRequestsOutstanding,
                maxDeleteRequests,
                "Delete");
          } catch (ElasticsearchTimeoutException ete) {
            failures.add(ete);
          }
          latch.countDown();
        }
      }

      private void incrementMapValue(int j, Map<Integer, Integer> map) {
        if (!map.containsKey(j)) {
          map.put(j, 0);
        }
        map.put(j, map.get(j) + 1);
      }

      private void waitForOutstandingRequests(
          TimeValue timeOut, Semaphore requestsOutstanding, int maxRequests, String name) {
        long start = System.currentTimeMillis();
        do {
          long msRemaining = timeOut.getMillis() - (System.currentTimeMillis() - start);
          logger.info(
              "[{}] going to try and aquire [{}] in [{}]ms [{}] available to aquire right now",
              name,
              maxRequests,
              msRemaining,
              requestsOutstanding.availablePermits());
          try {
            requestsOutstanding.tryAcquire(maxRequests, msRemaining, TimeUnit.MILLISECONDS);
            return;
          } catch (InterruptedException ie) {
            // Just keep swimming
          }
        } while ((System.currentTimeMillis() - start) < timeOut.getMillis());
        throw new ElasticsearchTimeoutException(
            "Requests were still outstanding after the timeout ["
                + timeOut
                + "] for type ["
                + name
                + "]");
      }
    }
    final List<UpdateThread> threads = new ArrayList<>();

    for (int i = 0; i < numberOfThreads; i++) {
      UpdateThread ut = new UpdateThread(numberOfIdsPerThread, numberOfUpdatesPerId);
      ut.start();
      threads.add(ut);
    }

    startLatch.countDown();
    latch.await();

    for (UpdateThread ut : threads) {
      ut.join(); // Threads should have finished because of the latch.await
    }

    // If are no errors every request recieved a response otherwise the test would have timedout
    // aquiring the request outstanding semaphores.
    for (Throwable throwable : failures) {
      logger.info("Captured failure on concurrent update:", throwable);
    }

    assertThat(failures.size(), equalTo(0));

    // Upsert all the ids one last time to make sure they are available at get time
    // This means that we add 1 to the expected versions and attempts
    // All the previous operations should be complete or failed at this point
    for (int i = 0; i < numberOfIdsPerThread; ++i) {
      UpdateResponse ur =
          client()
              .prepareUpdate("test", "type1", Integer.toString(i))
              .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE)
              .setRetryOnConflict(Integer.MAX_VALUE)
              .setUpsert(jsonBuilder().startObject().field("field", 1).endObject())
              .execute()
              .actionGet();
    }

    refresh();

    for (int i = 0; i < numberOfIdsPerThread; ++i) {
      int totalFailures = 0;
      GetResponse response =
          client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet();
      if (response.isExists()) {
        assertThat(response.getId(), equalTo(Integer.toString(i)));
        int expectedVersion = (numberOfThreads * numberOfUpdatesPerId * 2) + 1;
        for (UpdateThread ut : threads) {
          if (ut.failedMap.containsKey(i)) {
            totalFailures += ut.failedMap.get(i);
          }
        }
        expectedVersion -= totalFailures;
        logger.error(
            "Actual version [{}] Expected version [{}] Total failures [{}]",
            response.getVersion(),
            expectedVersion,
            totalFailures);
        assertThat(response.getVersion(), equalTo((long) expectedVersion));
        assertThat(
            response.getVersion() + totalFailures,
            equalTo((long) ((numberOfUpdatesPerId * numberOfThreads * 2) + 1)));
      }
    }
  }