public void addDeletion(SearchRequestBuilder searchRequest) { searchRequest .addSort("_doc", SortOrder.ASC) .setScroll(TimeValue.timeValueMinutes(5)) .setSize(100) // load only doc ids, not _source fields .setFetchSource(false); // this search is synchronous. An optimization would be to be non-blocking, // but it requires to tracking pending requests in close(). // Same semaphore can't be reused because of potential deadlock (requires to acquire // two locks) SearchResponse searchResponse = searchRequest.get(); while (true) { SearchHit[] hits = searchResponse.getHits().getHits(); for (SearchHit hit : hits) { DeleteRequestBuilder deleteRequestBuilder = client.prepareDelete(hit.index(), hit.type(), hit.getId()); SearchHitField routing = hit.field("_routing"); if (routing != null) { deleteRequestBuilder.setRouting(routing.getValue()); } add(deleteRequestBuilder.request()); } String scrollId = searchResponse.getScrollId(); searchResponse = client.prepareSearchScroll(scrollId).setScroll(TimeValue.timeValueMinutes(5)).get(); if (hits.length == 0) { client.nativeClient().prepareClearScroll().addScrollId(scrollId).get(); break; } } }
private void test( String name, Class<? extends HashFunction> expectedHashFunction, boolean expectedUseType) throws Exception { Path zippedIndexDir = getDataPath("/org/elasticsearch/cluster/routing/" + name + ".zip"); Settings baseSettings = prepareBackwardsDataDir(zippedIndexDir); internalCluster() .startNode(Settings.builder().put(baseSettings).put(Node.HTTP_ENABLED, true).build()); ensureYellow("test"); GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().get(); assertArrayEquals(new String[] {"test"}, getIndexResponse.indices()); GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); assertEquals( expectedHashFunction.getName(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION)); assertEquals( Boolean.valueOf(expectedUseType).toString(), getSettingsResponse.getSetting("test", IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE)); SearchResponse allDocs = client().prepareSearch("test").get(); assertSearchResponse(allDocs); assertHitCount(allDocs, 4); // Make sure routing works for (SearchHit hit : allDocs.getHits().hits()) { GetResponse get = client().prepareGet(hit.index(), hit.type(), hit.id()).get(); assertTrue(get.isExists()); } }
@Override public void onResponse(final SearchResponse response) { if (!isConnected()) { onFailure(new DfContentException("Disconnected.")); return; } final String scrollId = response.getScrollId(); if (isFirstScan()) { client .prepareSearchScroll(scrollId) .setScroll(RequestUtil.getScroll(request)) .execute(this); return; } final SearchHits hits = response.getHits(); final int size = hits.getHits().length; currentCount += size; logger.info( "scrollId: " + scrollId + ", totalHits: " + hits.totalHits() + ", hits: " + size + ", current: " + currentCount); try { for (final SearchHit hit : hits) { final String index = bulkIndex == null ? hit.index() : bulkIndex; final String type = bulkType == null ? hit.type() : bulkType; final String operation = "{\"index\":{\"_index\":\"" + index + "\",\"_type\":\"" + type + "\",\"_id\":\"" + hit.id() + "\"}}"; final String source = hit.sourceAsString(); writer.append(operation).append('\n'); writer.append(source).append('\n'); } if (size == 0 || scrollId == null) { // end writer.flush(); close(); listener.onResponse(null); } else { client .prepareSearchScroll(scrollId) .setScroll(RequestUtil.getScroll(request)) .execute(this); } } catch (final Exception e) { onFailure(e); } }
public void run() throws Exception { Node[] nodes = new Node[numberOfNodes]; for (int i = 0; i < nodes.length; i++) { nodes[i] = NodeBuilder.nodeBuilder().settings(settings).node(); } client = NodeBuilder.nodeBuilder().settings(settings).client(true).node(); client .client() .admin() .indices() .prepareCreate("test") .setSettings( settingsBuilder() .put("index.number_of_shards", numberOfShards) .put("index.number_of_replicas", numberOfReplicas)) .execute() .actionGet(); logger.info("********** [START] INDEXING INITIAL DOCS"); for (long i = 0; i < initialNumberOfDocs; i++) { indexDoc(); } logger.info("********** [DONE ] INDEXING INITIAL DOCS"); Indexer[] indexerThreads = new Indexer[indexers]; for (int i = 0; i < indexerThreads.length; i++) { indexerThreads[i] = new Indexer(); } for (int i = 0; i < indexerThreads.length; i++) { indexerThreads[i].start(); } long testStart = System.currentTimeMillis(); // start doing the rolling restart int nodeIndex = 0; while (true) { File[] nodeData = ((InternalNode) nodes[nodeIndex]) .injector() .getInstance(NodeEnvironment.class) .nodeDataLocations(); nodes[nodeIndex].close(); if (clearNodeData) { FileSystemUtils.deleteRecursively(nodeData); } try { ClusterHealthResponse clusterHealth = client .client() .admin() .cluster() .prepareHealth() .setWaitForGreenStatus() .setWaitForNodes(Integer.toString(numberOfNodes + 0 /* client node*/)) .setWaitForRelocatingShards(0) .setTimeout("10m") .execute() .actionGet(); if (clusterHealth.timedOut()) { logger.warn("timed out waiting for green status...."); } } catch (Exception e) { logger.warn("failed to execute cluster health...."); } nodes[nodeIndex] = NodeBuilder.nodeBuilder().settings(settings).node(); Thread.sleep(1000); try { ClusterHealthResponse clusterHealth = client .client() .admin() .cluster() .prepareHealth() .setWaitForGreenStatus() .setWaitForNodes(Integer.toString(numberOfNodes + 1 /* client node*/)) .setWaitForRelocatingShards(0) .setTimeout("10m") .execute() .actionGet(); if (clusterHealth.timedOut()) { logger.warn("timed out waiting for green status...."); } } catch (Exception e) { logger.warn("failed to execute cluster health...."); } if (++nodeIndex == nodes.length) { nodeIndex = 0; } if ((System.currentTimeMillis() - testStart) > period.millis()) { logger.info("test finished"); break; } } for (int i = 0; i < indexerThreads.length; i++) { indexerThreads[i].close = true; } Thread.sleep(indexerThrottle.millis() + 10000); for (int i = 0; i < indexerThreads.length; i++) { if (!indexerThreads[i].closed) { logger.warn("thread not closed!"); } } client.client().admin().indices().prepareRefresh().execute().actionGet(); // check the status IndicesStatusResponse status = client.client().admin().indices().prepareStatus("test").execute().actionGet(); for (IndexShardStatus shardStatus : status.index("test")) { ShardStatus shard = shardStatus.shards()[0]; logger.info("shard [{}], docs [{}]", shard.shardId(), shard.getDocs().numDocs()); for (ShardStatus shardStatu : shardStatus) { if (shard.docs().numDocs() != shardStatu.docs().numDocs()) { logger.warn( "shard doc number does not match!, got {} and {}", shard.docs().numDocs(), shardStatu.docs().numDocs()); } } } // check the count for (int i = 0; i < (nodes.length * 5); i++) { CountResponse count = client.client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(); logger.info( "indexed [{}], count [{}], [{}]", count.count(), indexCounter.get(), count.count() == indexCounter.get() ? "OK" : "FAIL"); if (count.count() != indexCounter.get()) { logger.warn("count does not match!"); } } // scan all the docs, verify all have the same version based on the number of replicas SearchResponse searchResponse = client .client() .prepareSearch() .setSearchType(SearchType.SCAN) .setQuery(matchAllQuery()) .setSize(50) .setScroll(TimeValue.timeValueMinutes(2)) .execute() .actionGet(); logger.info("Verifying versions for {} hits...", searchResponse.hits().totalHits()); while (true) { searchResponse = client .client() .prepareSearchScroll(searchResponse.scrollId()) .setScroll(TimeValue.timeValueMinutes(2)) .execute() .actionGet(); if (searchResponse.failedShards() > 0) { logger.warn("Search Failures " + Arrays.toString(searchResponse.shardFailures())); } for (SearchHit hit : searchResponse.hits()) { long version = -1; for (int i = 0; i < (numberOfReplicas + 1); i++) { GetResponse getResponse = client.client().prepareGet(hit.index(), hit.type(), hit.id()).execute().actionGet(); if (version == -1) { version = getResponse.version(); } else { if (version != getResponse.version()) { logger.warn( "Doc {} has different version numbers {} and {}", hit.id(), version, getResponse.version()); } } } } if (searchResponse.hits().hits().length == 0) { break; } } logger.info("Done verifying versions"); client.close(); for (Node node : nodes) { node.close(); } }