public ArrayList<Long> intersection(ArrayList<Long> first, ArrayList<Long> second) {
   ArrayList<Long> first_cl = new ArrayList<Long>();
   for (Long id : second) {
     for (Long id_f : first) {
       if (id_f.equals(id)) {
         first_cl.add(id_f);
         break;
       }
     }
   }
   return first_cl;
 }
  private void indexDoc() throws Exception {
    StringBuilder sb = new StringBuilder();
    XContentBuilder json =
        XContentFactory.jsonBuilder()
            .startObject()
            .field("field", "value" + ThreadLocalRandom.current().nextInt());

    int fields = Math.abs(ThreadLocalRandom.current().nextInt()) % numberOfFields;
    for (int i = 0; i < fields; i++) {
      json.field("num_" + i, ThreadLocalRandom.current().nextDouble());
      int tokens = ThreadLocalRandom.current().nextInt() % textTokens;
      sb.setLength(0);
      for (int j = 0; j < tokens; j++) {
        sb.append(UUID.randomBase64UUID()).append(' ');
      }
      json.field("text_" + i, sb.toString());
    }

    json.endObject();

    String id = Long.toString(idCounter.incrementAndGet());
    client
        .client()
        .prepareIndex("test", "type1", id)
        .setCreate(true)
        .setSource(json)
        .execute()
        .actionGet();
    indexCounter.incrementAndGet();
  }
 /** DELETE /organizers/:id -> delete the "id" organizer. */
 @RequestMapping(
     value = "/organizers/{id}",
     method = RequestMethod.DELETE,
     produces = MediaType.APPLICATION_JSON_VALUE)
 @Timed
 public ResponseEntity<Void> deleteOrganizer(@PathVariable Long id) {
   log.debug("REST request to delete Organizer : {}", id);
   organizerService.delete(id);
   return ResponseEntity.ok()
       .headers(HeaderUtil.createEntityDeletionAlert("organizer", id.toString()))
       .build();
 }
Пример #4
0
 /** DELETE /fases/:id -> delete the "id" fase. */
 @RequestMapping(
     value = "/fases/{id}",
     method = RequestMethod.DELETE,
     produces = MediaType.APPLICATION_JSON_VALUE)
 @Timed
 public ResponseEntity<Void> delete(@PathVariable Long id) {
   log.debug("REST request to delete Fase : {}", id);
   faseRepository.delete(id);
   faseSearchRepository.delete(id);
   return ResponseEntity.ok()
       .headers(HeaderUtil.createEntityDeletionAlert("fase", id.toString()))
       .build();
 }
  private void testLoad(boolean fullRecovery) {
    startNode("server1");

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.status());
    assertThat(clusterHealth.timedOut(), equalTo(false));
    assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.GREEN));

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("--> creating test index ...");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.status());
    assertThat(clusterHealth.timedOut(), equalTo(false));
    assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().count(),
        equalTo(0l));

    logger.info("--> indexing 1234 docs");
    for (long i = 0; i < 1234; i++) {
      client("server1")
          .prepareIndex("test", "type1", Long.toString(i))
          .setCreate(
              true) // make sure we use create, so if we recover wrongly, we will get increments...
          .setSource(MapBuilder.<String, Object>newMapBuilder().put("test", "value" + i).map())
          .execute()
          .actionGet();

      // snapshot every 100 so we get some actions going on in the gateway
      if ((i % 11) == 0) {
        client("server1").admin().indices().prepareGatewaySnapshot().execute().actionGet();
      }
      // flush every once is a while, so we get different data
      if ((i % 55) == 0) {
        client("server1").admin().indices().prepareFlush().execute().actionGet();
      }
    }

    logger.info("--> refreshing and checking count");
    client("server1").admin().indices().prepareRefresh().execute().actionGet();
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().count(),
        equalTo(1234l));

    logger.info("--> closing the server");
    closeNode("server1");
    if (fullRecovery) {
      logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
      FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
      logger.info(
          "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    }

    startNode("server1");

    logger.info("--> running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("--> done Cluster Health, status " + clusterHealth.status());
    assertThat(clusterHealth.timedOut(), equalTo(false));
    assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("--> checking count");
    assertThat(
        client("server1").prepareCount().setQuery(matchAllQuery()).execute().actionGet().count(),
        equalTo(1234l));

    logger.info("--> checking reuse / recovery status");
    IndicesStatusResponse statusResponse =
        client("server1").admin().indices().prepareStatus().setRecovery(true).execute().actionGet();
    for (IndexShardStatus indexShardStatus : statusResponse.index("test")) {
      for (ShardStatus shardStatus : indexShardStatus) {
        if (shardStatus.shardRouting().primary()) {
          if (fullRecovery || !isPersistentStorage()) {
            assertThat(shardStatus.gatewayRecoveryStatus().reusedIndexSize().bytes(), equalTo(0l));
          } else {
            assertThat(
                shardStatus.gatewayRecoveryStatus().reusedIndexSize().bytes(),
                greaterThan(
                    shardStatus.gatewayRecoveryStatus().indexSize().bytes()
                        - 8196 /* segments file and others */));
          }
        }
      }
    }
  }