Ejemplo n.º 1
0
  @Test
  public void delete_active_rules_from_profile() throws Exception {
    esSetup
        .client()
        .prepareBulk()
        // On profile 1
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("1")
                .source(testFileAsString("delete_from_profile/active_rule25.json")))
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("3")
                .source(testFileAsString("delete_from_profile/active_rule2702.json")))
        // On profile 2
        .add(
            Requests.indexRequest()
                .index("rules")
                .type("active_rule")
                .parent("2")
                .source(testFileAsString("delete_from_profile/active_rule523.json")))
        .setRefresh(true)
        .execute()
        .actionGet();

    esActiveRule.deleteActiveRulesFromProfile(1);
    assertThat(!esSetup.exists("rules", "active_rule", "25"));
    assertThat(!esSetup.exists("rules", "active_rule", "2702"));
    assertThat(esSetup.exists("rules", "active_rule", "523"));
  }
 @Override
 public void run() {
   try {
     barrier1.await();
     barrier2.await();
     for (; counter < max; counter++) {
       Client client = client(counter);
       long id = idGenerator.incrementAndGet();
       client
           .index(
               Requests.indexRequest()
                   .index("test")
                   .type("type1")
                   .id(Long.toString(id))
                   .source(
                       XContentFactory.jsonBuilder()
                           .startObject()
                           .field("num", id % fieldNumLimit)
                           .endObject()))
           .actionGet();
     }
     System.out.println("Indexer [" + id + "]: Done");
   } catch (Exception e) {
     System.err.println("Failed to index:");
     e.printStackTrace();
   } finally {
     latch.countDown();
   }
 }
    @Override
    public void run() {
      while (!closed) {
        try {
          for (int i = 0; i < simpleNumber; i++) {
            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();

            builder.field(fieldName, i);
            builder.endObject();
            bulkProcessor.add(
                Requests.indexRequest(indexName)
                    .type(typeName)
                    .id(UUID.randomUUID().toString())
                    .create(true)
                    .source(builder));
          }
          // in this case we force the bulking, but it should seldom done
          bulkProcessor.flush();
          delay();
        } catch (Exception e) {
          logger.error(e.getMessage(), e, (Object) null);
          closed = true;
        }
        if (closed) {
          return;
        }
      }
    }
Ejemplo n.º 4
0
  @Test
  public void shouldIndexFiveEventsOverThreeBatches() throws Exception {
    parameters.put(BATCH_SIZE, "2");
    Configurables.configure(fixture, new Context(parameters));
    Channel channel = bindAndStartChannel(fixture);

    int numberOfEvents = 5;
    Event[] events = new Event[numberOfEvents];

    Transaction tx = channel.getTransaction();
    tx.begin();
    for (int i = 0; i < numberOfEvents; i++) {
      String body = "event #" + i + " of " + numberOfEvents;
      Event event = EventBuilder.withBody(body.getBytes());
      events[i] = event;
      channel.put(event);
    }
    tx.commit();
    tx.close();

    int count = 0;
    Status status = Status.READY;
    while (status != Status.BACKOFF) {
      count++;
      status = fixture.process();
    }
    fixture.stop();

    assertEquals(3, count);

    client.admin().indices().refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();
    assertMatchAllQuery(numberOfEvents, events);
    assertBodyQuery(5, events);
  }
    @Override
    public void run() {
      while (!closed) {
        try {
          for (int i = 0; i < simpleNumber; i++) {
            XContentBuilder builder = XContentFactory.jsonBuilder();
            builder.startObject();

            builder.field(fieldName, i);
            builder.endObject();
            currentRequest.add(
                Requests.indexRequest(indexName)
                    .type(typeName)
                    .id(UUID.randomUUID().toString())
                    .create(true)
                    .source(builder));
            processBulkIfNeeded();
          }
          if (currentRequest.numberOfActions() > 0) {
            currentRequest.execute().get();
            currentRequest = client.prepareBulk();
          }
          delay();
        } catch (Exception e) {
          logger.error(e.getMessage(), e, (Object) null);
          closed = true;
        }
        if (closed) {
          return;
        }
      }
    }
 @Override
 public void handleRequest(final InteractiveRequest request, final InteractiveChannel channel) {
   String index = request.paramAsString("index");
   String type = request.paramAsString("type");
   String id = request.paramAsString("id");
   try {
     if (index == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("index is null"));
       return;
     }
     if (type == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("type is null"));
       return;
     }
     if (id == null) {
       channel.sendResponse(TYPE, new IllegalArgumentException("id is null"));
       return;
     }
     IndexRequest indexRequest =
         Requests.indexRequest(index)
             .type(type)
             .id(id)
             .source((Map<String, Object>) request.asMap().get("data"));
     add(indexRequest, channel);
   } catch (IOException ex) {
     try {
       channel.sendResponse(TYPE, ex);
     } catch (IOException ex1) {
       logger.error("error while sending exception");
     }
   }
 }
Ejemplo n.º 7
0
  @Test
  public void shouldIndexFiveEvents() throws Exception {
    // Make it so we only need to call process once
    parameters.put(BATCH_SIZE, "5");
    Configurables.configure(fixture, new Context(parameters));
    Channel channel = bindAndStartChannel(fixture);

    int numberOfEvents = 5;
    Event[] events = new Event[numberOfEvents];

    Transaction tx = channel.getTransaction();
    tx.begin();
    for (int i = 0; i < numberOfEvents; i++) {
      String body = "event #" + i + " of " + numberOfEvents;
      Event event = EventBuilder.withBody(body.getBytes());
      events[i] = event;
      channel.put(event);
    }
    tx.commit();
    tx.close();

    fixture.process();
    fixture.stop();
    client.admin().indices().refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

    assertMatchAllQuery(numberOfEvents, events);
    assertBodyQuery(5, events);
  }
  @Test
  public void testCopyHeadersClusterAdminRequest() {
    Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
    Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
    usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
    Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
    Map<String, String> transportContext =
        Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();

    HashMap<String, String> expectedHeaders = new HashMap<>();
    expectedHeaders.putAll(transportHeaders);
    expectedHeaders.putAll(copiedHeaders);

    Map<String, String> expectedContext = new HashMap<>();
    expectedContext.putAll(transportContext);
    expectedContext.putAll(restContext);

    Client client =
        client(
            new NoOpClient(), new FakeRestRequest(restHeaders, expectedContext), usefulRestHeaders);

    ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest();
    putHeaders(clusterHealthRequest, transportHeaders);
    putContext(clusterHealthRequest, transportContext);
    assertHeaders(clusterHealthRequest, transportHeaders);
    client.admin().cluster().health(clusterHealthRequest);
    assertHeaders(clusterHealthRequest, expectedHeaders);
    assertContext(clusterHealthRequest, expectedContext);

    ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest();
    putHeaders(clusterStateRequest, transportHeaders);
    putContext(clusterStateRequest, transportContext);
    assertHeaders(clusterStateRequest, transportHeaders);
    client.admin().cluster().state(clusterStateRequest);
    assertHeaders(clusterStateRequest, expectedHeaders);
    assertContext(clusterStateRequest, expectedContext);

    ClusterStatsRequest clusterStatsRequest = Requests.clusterStatsRequest();
    putHeaders(clusterStatsRequest, transportHeaders);
    putContext(clusterStatsRequest, transportContext);
    assertHeaders(clusterStatsRequest, transportHeaders);
    client.admin().cluster().clusterStats(clusterStatsRequest);
    assertHeaders(clusterStatsRequest, expectedHeaders);
    assertContext(clusterStatsRequest, expectedContext);
  }
Ejemplo n.º 9
0
  private IndexRequest prepareInsert(
      DocTableInfo tableInfo, ShardUpsertRequest request, ShardUpsertRequest.Item item)
      throws IOException {
    List<GeneratedReferenceInfo> generatedReferencesWithValue = new ArrayList<>();
    BytesReference source;
    if (request.isRawSourceInsert()) {
      assert item.insertValues().length > 0 : "empty insert values array";
      source = new BytesArray((BytesRef) item.insertValues()[0]);
    } else {
      XContentBuilder builder = XContentFactory.jsonBuilder().startObject();
      for (int i = 0; i < item.insertValues().length; i++) {
        Reference ref = request.insertColumns()[i];
        if (ref.info().granularity() == RowGranularity.DOC) {
          // don't include values for partitions in the _source
          // ideally columns with partition granularity shouldn't be part of the request
          builder.field(ref.ident().columnIdent().fqn(), item.insertValues()[i]);
          if (ref.info() instanceof GeneratedReferenceInfo) {
            generatedReferencesWithValue.add((GeneratedReferenceInfo) ref.info());
          }
        }
      }
      source = builder.bytes();
    }

    int generatedColumnSize = 0;
    for (GeneratedReferenceInfo generatedReferenceInfo : tableInfo.generatedColumns()) {
      if (!tableInfo.partitionedByColumns().contains(generatedReferenceInfo)) {
        generatedColumnSize++;
      }
    }

    int numMissingGeneratedColumns = generatedColumnSize - generatedReferencesWithValue.size();
    if (numMissingGeneratedColumns > 0
        || (generatedReferencesWithValue.size() > 0 && request.validateGeneratedColumns())) {
      // we need to evaluate some generated column expressions
      Map<String, Object> sourceMap =
          processGeneratedColumnsOnInsert(
              tableInfo,
              request.insertColumns(),
              item.insertValues(),
              request.isRawSourceInsert(),
              request.validateGeneratedColumns());
      source = XContentFactory.jsonBuilder().map(sourceMap).bytes();
    }

    IndexRequest indexRequest =
        Requests.indexRequest(request.index())
            .type(request.type())
            .id(item.id())
            .routing(request.routing())
            .source(source)
            .create(!request.overwriteDuplicates())
            .operationThreaded(false);
    if (logger.isTraceEnabled()) {
      logger.trace(
          "Inserting document with id {}, source: {}", item.id(), indexRequest.source().toUtf8());
    }
    return indexRequest;
  }
  @Test
  public void testCopyHeadersIndicesAdminRequest() {
    Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
    Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
    usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
    Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
    Map<String, String> transportContext =
        Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();

    HashMap<String, String> expectedHeaders = new HashMap<>();
    expectedHeaders.putAll(transportHeaders);
    expectedHeaders.putAll(copiedHeaders);

    Map<String, String> expectedContext = new HashMap<>();
    expectedContext.putAll(transportContext);
    expectedContext.putAll(restContext);

    Client client =
        client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders);

    CreateIndexRequest createIndexRequest = Requests.createIndexRequest("test");
    putHeaders(createIndexRequest, transportHeaders);
    putContext(createIndexRequest, transportContext);
    assertHeaders(createIndexRequest, transportHeaders);
    client.admin().indices().create(createIndexRequest);
    assertHeaders(createIndexRequest, expectedHeaders);
    assertContext(createIndexRequest, expectedContext);

    CloseIndexRequest closeIndexRequest = Requests.closeIndexRequest("test");
    putHeaders(closeIndexRequest, transportHeaders);
    putContext(closeIndexRequest, transportContext);
    assertHeaders(closeIndexRequest, transportHeaders);
    client.admin().indices().close(closeIndexRequest);
    assertHeaders(closeIndexRequest, expectedHeaders);
    assertContext(closeIndexRequest, expectedContext);

    FlushRequest flushRequest = Requests.flushRequest();
    putHeaders(flushRequest, transportHeaders);
    putContext(flushRequest, transportContext);
    assertHeaders(flushRequest, transportHeaders);
    client.admin().indices().flush(flushRequest);
    assertHeaders(flushRequest, expectedHeaders);
    assertContext(flushRequest, expectedContext);
  }
  @Test
  public void testCopyHeadersRequest() {
    Map<String, String> transportHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> restHeaders = randomHeaders(randomIntBetween(0, 10));
    Map<String, String> copiedHeaders = randomHeadersFrom(restHeaders);
    Set<String> usefulRestHeaders = new HashSet<>(copiedHeaders.keySet());
    usefulRestHeaders.addAll(randomMap(randomIntBetween(0, 10), "useful-").keySet());
    Map<String, String> restContext = randomContext(randomIntBetween(0, 10));
    Map<String, String> transportContext =
        Maps.difference(randomContext(randomIntBetween(0, 10)), restContext).entriesOnlyOnLeft();

    Map<String, String> expectedHeaders = new HashMap<>();
    expectedHeaders.putAll(transportHeaders);
    expectedHeaders.putAll(copiedHeaders);

    Map<String, String> expectedContext = new HashMap<>();
    expectedContext.putAll(transportContext);
    expectedContext.putAll(restContext);

    Client client =
        client(new NoOpClient(), new FakeRestRequest(restHeaders, restContext), usefulRestHeaders);

    SearchRequest searchRequest = Requests.searchRequest();
    putHeaders(searchRequest, transportHeaders);
    putContext(searchRequest, transportContext);
    assertHeaders(searchRequest, transportHeaders);
    client.search(searchRequest);
    assertHeaders(searchRequest, expectedHeaders);
    assertContext(searchRequest, expectedContext);

    GetRequest getRequest = Requests.getRequest("index");
    putHeaders(getRequest, transportHeaders);
    putContext(getRequest, transportContext);
    assertHeaders(getRequest, transportHeaders);
    client.get(getRequest);
    assertHeaders(getRequest, expectedHeaders);
    assertContext(getRequest, expectedContext);

    IndexRequest indexRequest = Requests.indexRequest();
    putHeaders(indexRequest, transportHeaders);
    putContext(indexRequest, transportContext);
    assertHeaders(indexRequest, transportHeaders);
    client.index(indexRequest);
    assertHeaders(indexRequest, expectedHeaders);
    assertContext(indexRequest, expectedContext);
  }
Ejemplo n.º 12
0
 private void waitForNodes(int numNodes) {
   ClusterHealthResponse actionGet =
       client()
           .admin()
           .cluster()
           .health(Requests.clusterHealthRequest().waitForNodes(Integer.toString(numNodes)))
           .actionGet();
   assertThat(actionGet.isTimedOut(), is(false));
 }
 @Override
 public boolean createIndex(String indexName) {
   Assert.notNull(indexName, "No index defined for Query");
   return client
       .admin()
       .indices()
       .create(Requests.createIndexRequest(indexName))
       .actionGet()
       .isAcknowledged();
 }
Ejemplo n.º 14
0
 public void testBulkAddIterable() {
   BulkRequest bulkRequest = Requests.bulkRequest();
   List<ActionRequest<?>> requests = new ArrayList<>();
   requests.add(new IndexRequest("test", "test", "id").source("field", "value"));
   requests.add(new UpdateRequest("test", "test", "id").doc("field", "value"));
   requests.add(new DeleteRequest("test", "test", "id"));
   bulkRequest.add(requests);
   assertThat(bulkRequest.requests().size(), equalTo(3));
   assertThat(bulkRequest.requests().get(0), instanceOf(IndexRequest.class));
   assertThat(bulkRequest.requests().get(1), instanceOf(UpdateRequest.class));
   assertThat(bulkRequest.requests().get(2), instanceOf(DeleteRequest.class));
 }
Ejemplo n.º 15
0
 @Test
 public void should_delete_from_integer_ids() throws Exception {
   esSetup
       .client()
       .prepareBulk()
       .add(
           Requests.indexRequest()
               .index("rules")
               .type("active_rule")
               .parent("1")
               .source(testFileAsString("delete_from_profile/active_rule25.json")))
       .add(
           Requests.indexRequest()
               .index("rules")
               .type("active_rule")
               .parent("3")
               .source(testFileAsString("delete_from_profile/active_rule2702.json")))
       .setRefresh(true)
       .execute()
       .actionGet();
   esActiveRule.deleteActiveRules(ImmutableList.of(25, 2702));
 }
  @Override
  public void handleRequest(final RestRequest request, final RestChannel channel) {
    final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest =
        Requests.clusterUpdateSettingsRequest();
    clusterUpdateSettingsRequest.listenerThreaded(false);
    try {
      Map<String, Object> source =
          XContentFactory.xContent(request.content()).createParser(request.content()).mapAndClose();
      if (source.containsKey("transient")) {
        clusterUpdateSettingsRequest.transientSettings((Map) source.get("transient"));
      }
      if (source.containsKey("persistent")) {
        clusterUpdateSettingsRequest.persistentSettings((Map) source.get("persistent"));
      }
    } catch (Exception e) {
      try {
        channel.sendResponse(new XContentThrowableRestResponse(request, e));
      } catch (IOException e1) {
        logger.warn("Failed to send response", e1);
      }
      return;
    }

    client
        .admin()
        .cluster()
        .updateSettings(
            clusterUpdateSettingsRequest,
            new ActionListener<ClusterUpdateSettingsResponse>() {
              @Override
              public void onResponse(ClusterUpdateSettingsResponse response) {
                try {
                  channel.sendResponse(new StringRestResponse(RestStatus.OK));
                } catch (Throwable e) {
                  onFailure(e);
                }
              }

              @Override
              public void onFailure(Throwable e) {
                if (logger.isDebugEnabled()) {
                  logger.debug("failed to handle cluster state", e);
                }
                try {
                  channel.sendResponse(new XContentThrowableRestResponse(request, e));
                } catch (IOException e1) {
                  logger.error("Failed to send failure response", e1);
                }
              }
            });
  }
  @Override
  public void handleRequest(final RestRequest request, final RestChannel channel) {
    ClusterStateRequest clusterStateRequest =
        Requests.clusterStateRequest()
            .filterRoutingTable(true)
            .filterNodes(true)
            .filteredIndexTemplates(request.param("name"))
            .filterOutIndices();

    clusterStateRequest.listenerThreaded(false);

    client
        .admin()
        .cluster()
        .state(
            clusterStateRequest,
            new ActionListener<ClusterStateResponse>() {
              @Override
              public void onResponse(ClusterStateResponse response) {
                Map<String, String> paramsMap = Maps.newHashMap();
                paramsMap.put("reduce_mappings", "true");
                ToXContent.Params params = new ToXContent.DelegatingMapParams(paramsMap, request);

                try {
                  MetaData metaData = response.getState().metaData();
                  XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
                  builder.startObject();

                  for (IndexTemplateMetaData indexMetaData : metaData.templates().values()) {
                    IndexTemplateMetaData.Builder.toXContent(indexMetaData, builder, params);
                  }

                  builder.endObject();

                  channel.sendResponse(new XContentRestResponse(request, OK, builder));
                } catch (Throwable e) {
                  onFailure(e);
                }
              }

              @Override
              public void onFailure(Throwable e) {
                try {
                  channel.sendResponse(new XContentThrowableRestResponse(request, e));
                } catch (IOException e1) {
                  logger.error("Failed to send failure response", e1);
                }
              }
            });
  }
 @Override
 public Set<String> queryForAlias(String indexName) {
   ClusterStateRequest clusterStateRequest =
       Requests.clusterStateRequest().routingTable(true).nodes(true).indices(indexName);
   Iterator<String> iterator =
       client
           .admin()
           .cluster()
           .state(clusterStateRequest)
           .actionGet()
           .getState()
           .getMetaData()
           .aliases()
           .keysIt();
   return newHashSet(iterator);
 }
  @Override
  public void handleRequest(final RestRequest request, final RestChannel channel) {
    String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
    final ClusterSearchShardsRequest clusterSearchShardsRequest =
        Requests.clusterSearchShardsRequest(indices);
    clusterSearchShardsRequest.local(
        request.paramAsBoolean("local", clusterSearchShardsRequest.local()));
    clusterSearchShardsRequest.listenerThreaded(false);

    clusterSearchShardsRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
    clusterSearchShardsRequest.routing(request.param("routing"));
    clusterSearchShardsRequest.preference(request.param("preference"));
    clusterSearchShardsRequest.indicesOptions(
        IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions()));

    client
        .admin()
        .cluster()
        .searchShards(
            clusterSearchShardsRequest,
            new ActionListener<ClusterSearchShardsResponse>() {
              @Override
              public void onResponse(ClusterSearchShardsResponse response) {
                try {
                  XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
                  builder.startObject();
                  response.toXContent(builder, request);
                  builder.endObject();
                  channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder));
                } catch (Throwable e) {
                  onFailure(e);
                }
              }

              @Override
              public void onFailure(Throwable e) {
                try {
                  channel.sendResponse(new BytesRestResponse(request, e));
                } catch (IOException e1) {
                  logger.error("Failed to send failure response", e1);
                }
              }
            });
  }
  private void initArticleIndexMapping() {
    try {
      boolean isExists = ElasticsearchHelper.isExistsIndex(transportClient, INDICE);
      if (!isExists) {
        // create index
        transportClient.admin().indices().prepareCreate(INDICE).execute().actionGet();
        // crate mapping
        XContentBuilder mapping =
            XContentFactory.jsonBuilder()
                .startObject()
                .startObject(TYPE)
                .startObject("_all")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .startObject("properties")
                .startObject(TITLE)
                .field("type", "string")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .startObject(SUMMARY)
                .field("type", "string")
                .field("indexAnalyzer", "ik")
                .field("searchAnalyzer", "ik")
                .endObject()
                .endObject()
                .endObject()
                .endObject();

        PutMappingRequest mappingRequest =
            Requests.putMappingRequest(INDICE).type(TYPE).source(mapping);
        transportClient.admin().indices().putMapping(mappingRequest).actionGet();

        logger.debug("create index and mapping are success!");
      } else {
        logger.debug("Index already exists!");
      }

    } catch (Exception e) {
      logger.error("create index and mapping are failure!", e);
    }
  }
  @Override
  public void handleRequest(final RestRequest request, final RestChannel channel) {
    ClusterStateRequest clusterStateRequest =
        Requests.clusterStateRequest().listenerThreaded(false).routingTable(false).nodes(false);
    clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
    client
        .admin()
        .cluster()
        .state(
            clusterStateRequest,
            new ActionListener<ClusterStateResponse>() {
              @Override
              public void onResponse(ClusterStateResponse response) {
                try {
                  XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
                  builder.startObject();

                  builder.startObject("persistent");
                  response.getState().metaData().persistentSettings().toXContent(builder, request);
                  builder.endObject();

                  builder.startObject("transient");
                  response.getState().metaData().transientSettings().toXContent(builder, request);
                  builder.endObject();

                  builder.endObject();

                  channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
                } catch (Throwable e) {
                  onFailure(e);
                }
              }

              @Override
              public void onFailure(Throwable e) {
                try {
                  channel.sendResponse(new XContentThrowableRestResponse(request, e));
                } catch (IOException e1) {
                  logger.error("Failed to send failure response", e1);
                }
              }
            });
  }
Ejemplo n.º 22
0
  @Test
  public void shouldIndexOneEvent() throws Exception {
    Configurables.configure(fixture, new Context(parameters));
    Channel channel = bindAndStartChannel(fixture);

    Transaction tx = channel.getTransaction();
    tx.begin();
    Event event = EventBuilder.withBody("event #1 or 1".getBytes());
    channel.put(event);
    tx.commit();
    tx.close();

    fixture.process();
    fixture.stop();
    client.admin().indices().refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

    assertMatchAllQuery(1, event);
    assertBodyQuery(1, event);
  }
Ejemplo n.º 23
0
  /**
   * Updates the specified objects
   *
   * @return the id's of the failed objects (e.g. due to versioning)
   */
  public Collection<Integer> bulkUpdate(
      Collection<T> objects, String indexName, boolean refresh, boolean enableVersioning) {
    // now using bulk API instead of feeding each doc separate with feedDoc
    BulkRequestBuilder brb = client.prepareBulk();
    // this works differently then the direct call to refresh!? maybe refresh is not async?
    //        brb.setRefresh(refresh);
    for (T o : objects) {
      if (o.getId() == null) {
        logger.warn("Skipped object without id when bulkUpdate:" + o);
        continue;
      }

      try {
        XContentBuilder source = createDoc(o);
        IndexRequest indexReq =
            Requests.indexRequest(indexName).type(getIndexType()).id(o.getId()).source(source);

        if (enableVersioning) indexReq.version(o.getVersion());

        brb.add(indexReq);
      } catch (IOException ex) {
        logger.warn("Cannot add object:" + o + " to bulkIndexing action." + ex.getMessage());
      }
    }
    if (brb.numberOfActions() > 0) {
      BulkResponse rsp = brb.execute().actionGet();
      if (rsp.hasFailures()) {
        List<Integer> list = new ArrayList<Integer>(rsp.items().length);
        for (BulkItemResponse br : rsp.items()) {
          if (br.isFailed()) {
            //                        logger.info("Error:" + br.failureMessage());
            list.add(br.itemId());
          }
        }
        return list;
      }
      if (refresh) refresh(indexName);
    }

    return Collections.emptyList();
  }
  @Test
  public void testNode() throws Exception {

    System.setProperty("karaf.home", "target/karaf");
    System.setProperty("karaf.name", "decanter-test");

    Dictionary<String, String> configuration = new Hashtable<>();
    configuration.put(EmbeddedNode.PATH_DATA, "target/karaf/es");
    EmbeddedNode embeddedNode = new EmbeddedNode();
    embeddedNode.start(configuration);
    Node node = embeddedNode.getNode();

    // gives time to the cluster to startup
    Thread.sleep(5000);

    ClusterHealthResponse healthResponse =
        node.client().admin().cluster().health(Requests.clusterHealthRequest()).actionGet();
    assertEquals(ClusterHealthStatus.GREEN, healthResponse.getStatus());

    embeddedNode.stop();
  }
Ejemplo n.º 25
0
 public void updateMapping() {
   try {
     XContentBuilder builder =
         XContentFactory.jsonBuilder()
             .startObject()
             .startObject("tweet")
             .startObject("properties")
             .startObject("text")
             .field("type", "string")
             .field("store", "yes")
             .field("analyzer", "ik_max_word")
             .field("search_analyzer", "ik_max_word")
             .endObject()
             .endObject()
             .endObject()
             .endObject();
     PutMappingRequest mapping =
         Requests.putMappingRequest("twitter").type("tweet").source(builder);
     ESAction.getInstance().client.admin().indices().putMapping(mapping).actionGet();
   } catch (IOException e) {
     e.printStackTrace();
   }
 }
Ejemplo n.º 26
0
  @Test
  public void shouldIndexComplexJsonEvent() throws Exception {
    Configurables.configure(fixture, new Context(parameters));
    Channel channel = bindAndStartChannel(fixture);

    Transaction tx = channel.getTransaction();
    tx.begin();
    Event event = EventBuilder.withBody("{\"event\":\"json content\",\"num\":1}".getBytes());
    channel.put(event);
    tx.commit();
    tx.close();

    fixture.process();
    fixture.stop();
    client.admin().indices().refresh(Requests.refreshRequest(timestampedIndexName)).actionGet();

    Map<String, Object> expectedBody = new HashMap<String, Object>();
    expectedBody.put("event", "json content");
    expectedBody.put("num", 1);

    assertSearch(1, performSearch(QueryBuilders.matchAllQuery()), expectedBody, event);
    assertSearch(
        1, performSearch(QueryBuilders.fieldQuery("@message.event", "json")), expectedBody, event);
  }
  @Test
  public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
    final int numShards = between(1, 5);
    String mapping =
        XContentFactory.jsonBuilder()
            .startObject()
            .startObject("type")
            .startObject("properties")
            .startObject("test")
            .field("type", "string")
            .field("index", "not_analyzed")
            .endObject()
            .endObject()
            .endObject()
            .endObject()
            .string();
    final double exceptionRate;
    final double exceptionOnOpenRate;
    if (frequently()) {
      if (randomBoolean()) {
        if (randomBoolean()) {
          exceptionOnOpenRate = 1.0 / between(5, 100);
          exceptionRate = 0.0d;
        } else {
          exceptionRate = 1.0 / between(5, 100);
          exceptionOnOpenRate = 0.0d;
        }
      } else {
        exceptionOnOpenRate = 1.0 / between(5, 100);
        exceptionRate = 1.0 / between(5, 100);
      }
    } else {
      // rarely no exception
      exceptionRate = 0d;
      exceptionOnOpenRate = 0d;
    }

    Builder settings =
        settingsBuilder()
            .put("index.number_of_shards", numShards)
            .put("index.number_of_replicas", randomIntBetween(0, 1))
            .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate)
            .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)
            .put(MockDirectoryHelper.CHECK_INDEX_ON_CLOSE, true);
    logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
    client()
        .admin()
        .indices()
        .prepareCreate("test")
        .setSettings(settings)
        .addMapping("type", mapping)
        .execute()
        .actionGet();
    ClusterHealthResponse clusterHealthResponse =
        client()
            .admin()
            .cluster()
            .health(
                Requests.clusterHealthRequest()
                    .waitForYellowStatus()
                    .timeout(TimeValue.timeValueSeconds(5)))
            .get(); // it's OK to timeout here
    final int numDocs;
    final boolean expectAllShardsFailed;
    if (clusterHealthResponse.isTimedOut()) {
      /* some seeds just won't let you create the index at all and we enter a ping-pong mode
       * trying one node after another etc. that is ok but we need to make sure we don't wait
       * forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
       * when we search below.*/
      logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
      numDocs = 1;
      expectAllShardsFailed = true;
    } else {
      numDocs = between(10, 100);
      expectAllShardsFailed = false;
    }
    long numCreated = 0;
    boolean[] added = new boolean[numDocs];
    for (int i = 0; i < numDocs; i++) {
      try {
        IndexResponse indexResponse =
            client()
                .prepareIndex("test", "type", "" + i)
                .setTimeout(TimeValue.timeValueSeconds(1))
                .setSource("test", English.intToEnglish(i))
                .get();
        if (indexResponse.isCreated()) {
          numCreated++;
          added[i] = true;
        }
      } catch (ElasticSearchException ex) {
      }
    }
    logger.info("Start Refresh");
    RefreshResponse refreshResponse =
        client()
            .admin()
            .indices()
            .prepareRefresh("test")
            .execute()
            .get(); // don't assert on failures here
    final boolean refreshFailed =
        refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
    logger.info(
        "Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ",
        refreshFailed,
        refreshResponse.getFailedShards(),
        refreshResponse.getShardFailures().length,
        refreshResponse.getSuccessfulShards(),
        refreshResponse.getTotalShards());

    final int numSearches = atLeast(10);
    // we don't check anything here really just making sure we don't leave any open files or a
    // broken index behind.
    for (int i = 0; i < numSearches; i++) {
      try {
        int docToQuery = between(0, numDocs - 1);
        long expectedResults = added[docToQuery] ? 1 : 0;
        logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
        SearchResponse searchResponse =
            client()
                .prepareSearch()
                .setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery)))
                .get();
        logger.info(
            "Successful shards: [{}]  numShards: [{}]",
            searchResponse.getSuccessfulShards(),
            numShards);
        if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
          assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(expectedResults));
        }
        // check match all
        searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
        if (searchResponse.getSuccessfulShards() == numShards && !refreshFailed) {
          assertThat(searchResponse.getHits().getTotalHits(), Matchers.equalTo(numCreated));
        }

      } catch (SearchPhaseExecutionException ex) {
        if (!expectAllShardsFailed) {
          throw ex;
        } else {
          logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
        }
      }
    }
  }
  @Test
  @Slow
  public void testSnapshotOperations() throws Exception {
    startNode("server1", getClassDefaultSettings());

    // get the environment, so we can clear the work dir when needed
    Environment environment =
        ((InternalNode) node("server1")).injector().getInstance(Environment.class);

    logger.info("Running Cluster Health (waiting for node to startup properly)");
    ClusterHealthResponse clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForGreenStatus())
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));

    // Translog tests

    logger.info("Creating index [{}]", "test");
    client("server1").admin().indices().prepareCreate("test").execute().actionGet();

    // create a mapping
    PutMappingResponse putMappingResponse =
        client("server1")
            .admin()
            .indices()
            .preparePutMapping("test")
            .setType("type1")
            .setSource(mappingSource())
            .execute()
            .actionGet();
    assertThat(putMappingResponse.isAcknowledged(), equalTo(true));

    // verify that mapping is there
    ClusterStateResponse clusterState =
        client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    // create two and delete the first
    logger.info("Indexing #1");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test")))
        .actionGet();
    logger.info("Indexing #2");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test")))
        .actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Deleting #1");
    client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet();

    // perform snapshot to the index
    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    // do it again, it should be a no op
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (only translog should be populated)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    // verify that mapping is there
    clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet();
    assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue());

    logger.info("Getting #1, should not exists");
    GetResponse getResponse =
        client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));

    // Now flush and add some data (so we have index recovery as well)
    logger.info(
        "Flushing, so we have actual content in the index files (#2 should be in the index)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();
    logger.info("Indexing #3, so we have something in the translog as well");
    client("server1")
        .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test")))
        .actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) and reuse work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Closing the server");
    closeNode("server1");
    logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
    FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles());
    logger.info(
        "Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (from the translog)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info(
        "Flushing, so we have actual content in the index files (#3 should be in the index now as well)");
    client("server1").admin().indices().flush(flushRequest("test")).actionGet();

    logger.info("Gateway Snapshot");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();
    logger.info("Gateway Snapshot (should be a no op)");
    client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet();

    logger.info("Closing the server");
    closeNode("server1");
    logger.info(
        "Starting the server, should recover from the gateway (just from the index, nothing in the translog)");
    startNode("server1");

    logger.info("Running Cluster Health (wait for the shards to startup)");
    clusterHealth =
        client("server1")
            .admin()
            .cluster()
            .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1))
            .actionGet();
    logger.info("Done Cluster Health, status " + clusterHealth.getStatus());
    assertThat(clusterHealth.isTimedOut(), equalTo(false));
    assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));

    logger.info("Getting #1, should not exists");
    getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet();
    assertThat(getResponse.isExists(), equalTo(false));
    logger.info("Getting #2 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test")));
    logger.info("Getting #3 (not from the translog, but from the index)");
    getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet();
    assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test")));

    logger.info("Deleting the index");
    client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet();
  }
Ejemplo n.º 29
0
  /**
   * Tests corruption that happens on a single shard when no replicas are present. We make sure that
   * the primary stays unassigned and all other replicas for the healthy shards happens
   */
  public void testCorruptPrimaryNoReplica()
      throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
                    .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
                    .put(
                        MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(),
                        false) // no checkindex - we corrupt shards on purpose
                    .put(
                        IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
                        new ByteSizeValue(
                            1,
                            ByteSizeUnit
                                .PB)) // no translog based flush - it might change the .liv /
                // segments.N files
                ));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
      builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(
        client()
            .admin()
            .indices()
            .prepareFlush()
            .setForce(true)
            .setWaitIfOngoing(true)
            .execute()
            .actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);

    ShardRouting shardRouting = corruptRandomPrimaryFile();
    /*
     * we corrupted the primary shard - now lets make sure we never recover from it successfully
     */
    Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    client().admin().cluster().prepareReroute().get();

    boolean didClusterTurnRed =
        awaitBusy(
            () -> {
              ClusterHealthStatus test =
                  client()
                      .admin()
                      .cluster()
                      .health(Requests.clusterHealthRequest("test"))
                      .actionGet()
                      .getStatus();
              return test == ClusterHealthStatus.RED;
            },
            5,
            TimeUnit
                .MINUTES); // sometimes on slow nodes the replication / recovery is just dead slow
    final ClusterHealthResponse response =
        client().admin().cluster().health(Requests.clusterHealthRequest("test")).get();
    if (response.getStatus() != ClusterHealthStatus.RED) {
      logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed);
      logger.info(
          "cluster state:\n{}\n{}",
          client().admin().cluster().prepareState().get().getState().prettyPrint(),
          client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
    }
    assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    GroupShardsIterator shardIterators =
        state.getRoutingTable().activePrimaryShardsGrouped(new String[] {"test"}, false);
    for (ShardIterator iterator : shardIterators) {
      ShardRouting routing;
      while ((routing = iterator.nextOrNull()) != null) {
        if (routing.getId() == shardRouting.getId()) {
          assertThat(routing.state(), equalTo(ShardRoutingState.UNASSIGNED));
        } else {
          assertThat(
              routing.state(),
              anyOf(equalTo(ShardRoutingState.RELOCATING), equalTo(ShardRoutingState.STARTED)));
        }
      }
    }
    final List<Path> files = listShardFiles(shardRouting);
    Path corruptedFile = null;
    for (Path file : files) {
      if (file.getFileName().toString().startsWith("corrupted_")) {
        corruptedFile = file;
        break;
      }
    }
    assertThat(corruptedFile, notNullValue());
  }
Ejemplo n.º 30
0
  /**
   * Tests corruption that happens on the network layer and that the primary does not get affected
   * by corruption that happens on the way to the replica. The file on disk stays uncorrupted
   */
  public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);
    if (cluster().numDataNodes() < 3) {
      internalCluster()
          .startNode(
              Settings.builder()
                  .put(Node.NODE_DATA_SETTING.getKey(), true)
                  .put(Node.NODE_MASTER_SETTING.getKey(), false));
    }
    NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
    List<NodeStats> dataNodeStats = new ArrayList<>();
    for (NodeStats stat : nodeStats.getNodes()) {
      if (stat.getNode().isDataNode()) {
        dataNodeStats.add(stat);
      }
    }

    assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
    Collections.shuffle(dataNodeStats, random());
    NodeStats primariesNode = dataNodeStats.get(0);
    NodeStats unluckyNode = dataNodeStats.get(1);

    assertAcked(
        prepareCreate("test")
            .setSettings(
                Settings.builder()
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
                    .put(
                        IndexMetaData.SETTING_NUMBER_OF_SHARDS,
                        between(1, 4)) // don't go crazy here it must recovery fast
                    // This does corrupt files on the replica, so we can't check:
                    .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
                    .put(
                        "index.routing.allocation.include._name", primariesNode.getNode().getName())
                    .put(
                        EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
                        EnableAllocationDecider.Rebalance.NONE)));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
      builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    assertAllSuccessful(
        client()
            .admin()
            .indices()
            .prepareFlush()
            .setForce(true)
            .setWaitIfOngoing(true)
            .execute()
            .actionGet());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    final boolean truncate = randomBoolean();
    for (NodeStats dataNode : dataNodeStats) {
      MockTransportService mockTransportService =
          ((MockTransportService)
              internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
      mockTransportService.addDelegate(
          internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
          new MockTransportService.DelegateTransport(mockTransportService.original()) {

            @Override
            public void sendRequest(
                DiscoveryNode node,
                long requestId,
                String action,
                TransportRequest request,
                TransportRequestOptions options)
                throws IOException, TransportException {
              if (action.equals(RecoveryTargetService.Actions.FILE_CHUNK)) {
                RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
                if (truncate && req.length() > 1) {
                  BytesRef bytesRef = req.content().toBytesRef();
                  BytesArray array =
                      new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
                  request =
                      new RecoveryFileChunkRequest(
                          req.recoveryId(),
                          req.shardId(),
                          req.metadata(),
                          req.position(),
                          array,
                          req.lastChunk(),
                          req.totalTranslogOps(),
                          req.sourceThrottleTimeInNanos());
                } else {
                  assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes
                      : "no internal reference!!";
                  final byte[] array = req.content().toBytesRef().bytes;
                  int i = randomIntBetween(0, req.content().length() - 1);
                  array[i] = (byte) ~array[i]; // flip one byte in the content
                }
              }
              super.sendRequest(node, requestId, action, request, options);
            }
          });
    }

    Settings build =
        Settings.builder()
            .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
            .put("index.routing.allocation.include._name", "*")
            .build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    client().admin().cluster().prepareReroute().get();
    ClusterHealthResponse actionGet =
        client()
            .admin()
            .cluster()
            .health(Requests.clusterHealthRequest("test").waitForGreenStatus())
            .actionGet();
    if (actionGet.isTimedOut()) {
      logger.info(
          "ensureGreen timed out, cluster state:\n{}\n{}",
          client().admin().cluster().prepareState().get().getState().prettyPrint(),
          client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
      assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
    }
    // we are green so primaries got not corrupted.
    // ensure that no shard is actually allocated on the unlucky node
    ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
    for (IndexShardRoutingTable table :
        clusterStateResponse.getState().getRoutingTable().index("test")) {
      for (ShardRouting routing : table) {
        if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
          assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
          assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
        }
      }
    }
    final int numIterations = scaledRandomIntBetween(5, 20);
    for (int i = 0; i < numIterations; i++) {
      SearchResponse response = client().prepareSearch().setSize(numDocs).get();
      assertHitCount(response, numDocs);
    }
  }