// Duel between histograms and scripted terms public void testDuelTermsHistogram() throws Exception { createIndex("idx"); final int numDocs = scaledRandomIntBetween(500, 5000); final int maxNumTerms = randomIntBetween(10, 2000); final int interval = randomIntBetween(1, 100); final Integer[] values = new Integer[maxNumTerms]; for (int i = 0; i < values.length; ++i) { values[i] = randomInt(maxNumTerms * 3) - maxNumTerms; } for (int i = 0; i < numDocs; ++i) { XContentBuilder source = jsonBuilder().startObject().field("num", randomDouble()).startArray("values"); final int numValues = randomInt(4); for (int j = 0; j < numValues; ++j) { source = source.value(randomFrom(values)); } source = source.endArray().endObject(); client().prepareIndex("idx", "type").setSource(source).execute().actionGet(); } assertNoFailures( client() .admin() .indices() .prepareRefresh("idx") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .execute() .get()); SearchResponse resp = client() .prepareSearch("idx") .addAggregation( terms("terms") .field("values") .collectMode(randomFrom(SubAggCollectionMode.values())) .script("floor(_value / interval)") .param("interval", interval) .size(maxNumTerms)) .addAggregation(histogram("histo").field("values").interval(interval)) .execute() .actionGet(); assertSearchResponse(resp); Terms terms = resp.getAggregations().get("terms"); assertThat(terms, notNullValue()); Histogram histo = resp.getAggregations().get("histo"); assertThat(histo, notNullValue()); assertThat(terms.getBuckets().size(), equalTo(histo.getBuckets().size())); for (Terms.Bucket bucket : terms.getBuckets()) { final long key = bucket.getKeyAsNumber().longValue() * interval; final Histogram.Bucket histoBucket = histo.getBucketByKey(key); assertEquals(bucket.getDocCount(), histoBucket.getDocCount()); } }
@Override protected void masterOperation( final ClusterStateRequest request, final ClusterState state, ActionListener<ClusterStateResponse> listener) throws ElasticsearchException { ClusterState currentState = clusterService.state(); logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); if (request.nodes()) { builder.nodes(currentState.nodes()); } if (request.routingTable()) { if (request.indices().length > 0) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); for (String filteredIndex : request.indices()) { if (currentState.routingTable().getIndicesRouting().containsKey(filteredIndex)) { routingTableBuilder.add( currentState.routingTable().getIndicesRouting().get(filteredIndex)); } } builder.routingTable(routingTableBuilder); } else { builder.routingTable(currentState.routingTable()); } } if (request.blocks()) { builder.blocks(currentState.blocks()); } if (request.metaData()) { MetaData.Builder mdBuilder; if (request.indices().length == 0) { mdBuilder = MetaData.builder(currentState.metaData()); } else { mdBuilder = MetaData.builder(); } if (request.indices().length > 0) { String[] indices = currentState .metaData() .concreteIndices(IndicesOptions.lenientExpandOpen(), request.indices()); for (String filteredIndex : indices) { IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex); if (indexMetaData != null) { mdBuilder.put(indexMetaData, false); } } } builder.metaData(mdBuilder); } listener.onResponse(new ClusterStateResponse(clusterName, builder.build())); }
@Test public void testIndexNameFiltering() { assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {}, new String[] {"foo", "bar", "baz"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"*"}, new String[] {"foo", "bar", "baz"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"foo", "bar", "baz"}, new String[] {"foo", "bar", "baz"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"foo"}, new String[] {"foo"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"ba*", "-bar", "-baz"}, new String[] {}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"-bar"}, new String[] {"foo", "baz"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"-ba*"}, new String[] {"foo"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"+ba*"}, new String[] {"bar", "baz"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"+bar", "+foo"}, new String[] {"bar", "foo"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"zzz", "bar"}, IndicesOptions.lenientExpandOpen(), new String[] {"bar"}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {""}, IndicesOptions.lenientExpandOpen(), new String[] {}); assertIndexNameFiltering( new String[] {"foo", "bar", "baz"}, new String[] {"foo", "", "ba*"}, IndicesOptions.lenientExpandOpen(), new String[] {"foo", "bar", "baz"}); }
public ESDeletePartitionTask( UUID jobId, TransportDeleteIndexAction transport, ESDeletePartitionNode node) { super(jobId); this.transport = transport; this.request = new DeleteIndexRequest(node.indices()); if (node.indices().length > 1) { /** * table is partitioned, in case of concurrent "delete from partitions" it could be that some * partitions are already deleted, so ignore it if some are missing */ this.request.indicesOptions(IndicesOptions.lenientExpandOpen()); } else { this.request.indicesOptions(IndicesOptions.strictExpandOpen()); } this.listener = new DeleteIndexListener(result); }
@Test public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { IndicesOptions indicesOptions = IndicesOptions.fromOptions( randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); ClusterStateRequest clusterStateRequest = new ClusterStateRequest() .routingTable(randomBoolean()) .metaData(randomBoolean()) .nodes(randomBoolean()) .blocks(randomBoolean()) .indices("testindex", "testindex2") .indicesOptions(indicesOptions); Version testVersion = VersionUtils.randomVersionBetween( random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(testVersion); clusterStateRequest.writeTo(output); StreamInput streamInput = StreamInput.wrap(output.bytes()); streamInput.setVersion(testVersion); ClusterStateRequest deserializedCSRequest = new ClusterStateRequest(); deserializedCSRequest.readFrom(streamInput); assertThat(deserializedCSRequest.routingTable(), equalTo(clusterStateRequest.routingTable())); assertThat(deserializedCSRequest.metaData(), equalTo(clusterStateRequest.metaData())); assertThat(deserializedCSRequest.nodes(), equalTo(clusterStateRequest.nodes())); assertThat(deserializedCSRequest.blocks(), equalTo(clusterStateRequest.blocks())); assertThat(deserializedCSRequest.indices(), equalTo(clusterStateRequest.indices())); if (testVersion.onOrAfter(Version.V_1_5_0)) { assertOptionsMatch( deserializedCSRequest.indicesOptions(), clusterStateRequest.indicesOptions()); } else { // versions before V_1_5_0 use IndicesOptions.lenientExpandOpen() assertOptionsMatch( deserializedCSRequest.indicesOptions(), IndicesOptions.lenientExpandOpen()); } } }
private DocIndexMetaData docIndexMetaData() { DocIndexMetaData docIndexMetaData; String templateName = PartitionName.templateName(ident.schema(), ident.name()); boolean createdFromTemplate = false; if (metaData.getTemplates().containsKey(templateName)) { docIndexMetaData = buildDocIndexMetaDataFromTemplate(ident.indexName(), templateName); createdFromTemplate = true; concreteIndices = metaData.concreteIndices(IndicesOptions.lenientExpandOpen(), ident.indexName()); } else { try { concreteIndices = metaData.concreteIndices(IndicesOptions.strictExpandOpen(), ident.indexName()); if (concreteIndices.length == 0) { // no matching index found throw new TableUnknownException(ident); } docIndexMetaData = buildDocIndexMetaData(concreteIndices[0]); } catch (IndexMissingException ex) { throw new TableUnknownException(ident.fqn(), ex); } } if ((!createdFromTemplate && concreteIndices.length == 1) || !checkAliasSchema) { return docIndexMetaData; } for (int i = 0; i < concreteIndices.length; i++) { try { docIndexMetaData = docIndexMetaData.merge( buildDocIndexMetaData(concreteIndices[i]), transportPutIndexTemplateAction, createdFromTemplate); } catch (IOException e) { throw new UnhandledServerException("Unable to merge/build new DocIndexMetaData", e); } } return docIndexMetaData; }
public void testDeleteByQueryWithMissingIndex() throws Exception { client() .prepareIndex("test", "test") .setSource(jsonBuilder().startObject().field("field1", 1).endObject()) .setRefresh(true) .get(); assertHitCount(client().prepareSearch().setSize(0).get(), 1); DeleteByQueryRequestBuilder delete = newDeleteByQuery().setIndices("test", "missing").setQuery(QueryBuilders.matchAllQuery()); try { delete.get(); fail("should have thrown an exception because of a missing index"); } catch (IndexNotFoundException e) { // Ok } delete.setIndicesOptions(IndicesOptions.lenientExpandOpen()); assertDBQResponse(delete.get(), 1L, 1L, 0L, 0L); refresh(); assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); assertSearchContextsClosed(); }
private void assertIndexNameFiltering(String[] indices, String[] filter, String[] expected) { assertIndexNameFiltering(indices, filter, IndicesOptions.lenientExpandOpen(), expected); }
// Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported // Duel with filters public void testRandomRanges() throws Exception { final int numDocs = scaledRandomIntBetween(500, 5000); final double[][] docs = new double[numDocs][]; for (int i = 0; i < numDocs; ++i) { final int numValues = randomInt(5); docs[i] = new double[numValues]; for (int j = 0; j < numValues; ++j) { docs[i][j] = randomDouble() * 100; } } createIndex("idx"); for (int i = 0; i < docs.length; ++i) { XContentBuilder source = jsonBuilder().startObject().startArray("values"); for (int j = 0; j < docs[i].length; ++j) { source = source.value(docs[i][j]); } source = source.endArray().endObject(); client().prepareIndex("idx", "type").setSource(source).execute().actionGet(); } assertNoFailures( client() .admin() .indices() .prepareRefresh("idx") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .execute() .get()); final int numRanges = randomIntBetween(1, 20); final double[][] ranges = new double[numRanges][]; for (int i = 0; i < ranges.length; ++i) { switch (randomInt(2)) { case 0: ranges[i] = new double[] {Double.NEGATIVE_INFINITY, randomInt(100)}; break; case 1: ranges[i] = new double[] {randomInt(100), Double.POSITIVE_INFINITY}; break; case 2: ranges[i] = new double[] {randomInt(100), randomInt(100)}; break; default: throw new AssertionError(); } } RangeBuilder query = range("range").field("values"); for (int i = 0; i < ranges.length; ++i) { String key = Integer.toString(i); if (ranges[i][0] == Double.NEGATIVE_INFINITY) { query.addUnboundedTo(key, ranges[i][1]); } else if (ranges[i][1] == Double.POSITIVE_INFINITY) { query.addUnboundedFrom(key, ranges[i][0]); } else { query.addRange(key, ranges[i][0], ranges[i][1]); } } SearchRequestBuilder reqBuilder = client().prepareSearch("idx").addAggregation(query); for (int i = 0; i < ranges.length; ++i) { RangeFilterBuilder filter = FilterBuilders.rangeFilter("values"); if (ranges[i][0] != Double.NEGATIVE_INFINITY) { filter = filter.from(ranges[i][0]); } if (ranges[i][1] != Double.POSITIVE_INFINITY) { filter = filter.to(ranges[i][1]); } reqBuilder = reqBuilder.addAggregation(filter("filter" + i).filter(filter)); } SearchResponse resp = reqBuilder.execute().actionGet(); Range range = resp.getAggregations().get("range"); for (int i = 0; i < ranges.length; ++i) { long count = 0; for (double[] values : docs) { for (double value : values) { if (value >= ranges[i][0] && value < ranges[i][1]) { ++count; break; } } } final Range.Bucket bucket = range.getBucketByKey(Integer.toString(i)); assertEquals(bucket.getKey(), count, bucket.getDocCount()); final Filter filter = resp.getAggregations().get("filter" + i); assertThat(filter.getDocCount(), equalTo(count)); } }
// test long/double/string terms aggs with high number of buckets that require array growth public void testDuelTerms() throws Exception { final int numDocs = scaledRandomIntBetween(1000, 2000); final int maxNumTerms = randomIntBetween(10, 5000); final IntOpenHashSet valuesSet = new IntOpenHashSet(); cluster().wipeIndices("idx"); prepareCreate("idx") .addMapping( "type", jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("string_values") .field("type", "string") .field("index", "not_analyzed") .startObject("fields") .startObject("doc_values") .field("type", "string") .field("index", "no") .startObject("fielddata") .field("format", "doc_values") .endObject() .endObject() .endObject() .endObject() .startObject("long_values") .field("type", "long") .endObject() .startObject("double_values") .field("type", "double") .endObject() .endObject() .endObject()) .execute() .actionGet(); List<IndexRequestBuilder> indexingRequests = Lists.newArrayList(); for (int i = 0; i < numDocs; ++i) { final int[] values = new int[randomInt(4)]; for (int j = 0; j < values.length; ++j) { values[j] = randomInt(maxNumTerms - 1) - 1000; valuesSet.add(values[j]); } XContentBuilder source = jsonBuilder().startObject().field("num", randomDouble()).startArray("long_values"); for (int j = 0; j < values.length; ++j) { source = source.value(values[j]); } source = source.endArray().startArray("double_values"); for (int j = 0; j < values.length; ++j) { source = source.value((double) values[j]); } source = source.endArray().startArray("string_values"); for (int j = 0; j < values.length; ++j) { source = source.value(Integer.toString(values[j])); } source = source.endArray().endObject(); indexingRequests.add(client().prepareIndex("idx", "type").setSource(source)); } indexRandom(true, indexingRequests); assertNoFailures( client() .admin() .indices() .prepareRefresh("idx") .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .execute() .get()); TermsAggregatorFactory.ExecutionMode[] globalOrdinalModes = new TermsAggregatorFactory.ExecutionMode[] { TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS_HASH, TermsAggregatorFactory.ExecutionMode.GLOBAL_ORDINALS }; SearchResponse resp = client() .prepareSearch("idx") .addAggregation( terms("long") .field("long_values") .size(maxNumTerms) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(min("min").field("num"))) .addAggregation( terms("double") .field("double_values") .size(maxNumTerms) .collectMode(randomFrom(SubAggCollectionMode.values())) .subAggregation(max("max").field("num"))) .addAggregation( terms("string_map") .field("string_values") .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint(TermsAggregatorFactory.ExecutionMode.MAP.toString()) .size(maxNumTerms) .subAggregation(stats("stats").field("num"))) .addAggregation( terms("string_global_ordinals") .field("string_values") .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint( globalOrdinalModes[randomInt(globalOrdinalModes.length - 1)].toString()) .size(maxNumTerms) .subAggregation(extendedStats("stats").field("num"))) .addAggregation( terms("string_global_ordinals_doc_values") .field("string_values.doc_values") .collectMode(randomFrom(SubAggCollectionMode.values())) .executionHint( globalOrdinalModes[randomInt(globalOrdinalModes.length - 1)].toString()) .size(maxNumTerms) .subAggregation(extendedStats("stats").field("num"))) .execute() .actionGet(); assertAllSuccessful(resp); assertEquals(numDocs, resp.getHits().getTotalHits()); final Terms longTerms = resp.getAggregations().get("long"); final Terms doubleTerms = resp.getAggregations().get("double"); final Terms stringMapTerms = resp.getAggregations().get("string_map"); final Terms stringGlobalOrdinalsTerms = resp.getAggregations().get("string_global_ordinals"); final Terms stringGlobalOrdinalsDVTerms = resp.getAggregations().get("string_global_ordinals_doc_values"); assertEquals(valuesSet.size(), longTerms.getBuckets().size()); assertEquals(valuesSet.size(), doubleTerms.getBuckets().size()); assertEquals(valuesSet.size(), stringMapTerms.getBuckets().size()); assertEquals(valuesSet.size(), stringGlobalOrdinalsTerms.getBuckets().size()); assertEquals(valuesSet.size(), stringGlobalOrdinalsDVTerms.getBuckets().size()); for (Terms.Bucket bucket : longTerms.getBuckets()) { final Terms.Bucket doubleBucket = doubleTerms.getBucketByKey( Double.toString(Long.parseLong(bucket.getKeyAsText().string()))); final Terms.Bucket stringMapBucket = stringMapTerms.getBucketByKey(bucket.getKeyAsText().string()); final Terms.Bucket stringGlobalOrdinalsBucket = stringGlobalOrdinalsTerms.getBucketByKey(bucket.getKeyAsText().string()); final Terms.Bucket stringGlobalOrdinalsDVBucket = stringGlobalOrdinalsDVTerms.getBucketByKey(bucket.getKeyAsText().string()); assertNotNull(doubleBucket); assertNotNull(stringMapBucket); assertNotNull(stringGlobalOrdinalsBucket); assertNotNull(stringGlobalOrdinalsDVBucket); assertEquals(bucket.getDocCount(), doubleBucket.getDocCount()); assertEquals(bucket.getDocCount(), stringMapBucket.getDocCount()); assertEquals(bucket.getDocCount(), stringGlobalOrdinalsBucket.getDocCount()); assertEquals(bucket.getDocCount(), stringGlobalOrdinalsDVBucket.getDocCount()); } }
public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException { if (backwardsCluster().numNewDataNodes() == 0) { backwardsCluster().startNewNode(); } assertAcked( prepareCreate("test") .setSettings( Settings.builder() .put( "index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()) .put(indexSettings()))); ensureYellow(); assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern()); int numDocs = randomIntBetween(100, 150); ArrayList<String> ids = new ArrayList<>(); logger.info(" --> indexing [{}] docs", numDocs); IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(i); ids.add(id); docs[i] = client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i)); } indexRandom(true, docs); SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); if (randomBoolean()) { logger.info(" --> moving index to new nodes"); backwardsCluster().allowOnlyNewNodes("test"); } else { logger.info(" --> allow index to on all nodes"); backwardsCluster().allowOnAllNodes("test"); } logger.info(" --> indexing [{}] more docs", numDocs); // sometimes index while relocating if (randomBoolean()) { for (int i = 0; i < numDocs; i++) { String id = randomRealisticUnicodeOfLength(10) + String.valueOf(numDocs + i); ids.add(id); docs[i] = client() .prepareIndex("test", "type1", id) .setSource("field1", English.intToEnglish(numDocs + i)); } indexRandom(true, docs); if (compatibilityVersion().before(Version.V_1_3_0)) { // issue another refresh through a new node to side step issue #6545 assertNoFailures( backwardsCluster() .internalCluster() .dataNodeClient() .admin() .indices() .prepareRefresh() .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .execute() .get()); } numDocs *= 2; } logger.info(" --> waiting for relocation to complete", numDocs); ensureYellow("test"); // move all shards to the new node (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { assertSearchHits( client().prepareSearch().setSize(ids.size()).get(), ids.toArray(new String[ids.size()])); } assertVersionCreated(compatibilityVersion(), "test"); }