@Override public ClusterState execute(ClusterState currentState) throws Exception { if (cancellableThreads.isCancelled() == false) { // no need to run this if recovery is canceled IndexMetaData indexMetaData = clusterService.state().metaData().getIndices().get(indexService.index().getName()); ImmutableOpenMap<String, MappingMetaData> metaDataMappings = null; if (indexMetaData != null) { metaDataMappings = indexMetaData.getMappings(); } // default mapping should not be sent back, it can only be updated by put mapping API, and // its // a full in place replace, we don't want to override a potential update coming into it for (DocumentMapper documentMapper : indexService.mapperService().docMappers(false)) { MappingMetaData mappingMetaData = metaDataMappings == null ? null : metaDataMappings.get(documentMapper.type()); if (mappingMetaData == null || !documentMapper.refreshSource().equals(mappingMetaData.source())) { // not on master yet in the right form documentMappersToUpdate.add(documentMapper); } } } return currentState; }
static SearchContext createSearchContext(String indexName, String parentType, String childType) throws IOException { Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_6_0).build(); IndexService indexService = createIndex(indexName, settings); MapperService mapperService = indexService.mapperService(); // Parent/child parsers require that the parent and child type to be presented in mapping // Sometimes we want a nested object field in the parent type that triggers nonNestedDocsFilter // to be used mapperService.merge( parentType, new CompressedXContent( PutMappingRequest.buildFromSimplifiedDef( parentType, "nested_field", random().nextBoolean() ? "type=nested" : "type=object") .string()), true, false); mapperService.merge( childType, new CompressedXContent( PutMappingRequest.buildFromSimplifiedDef( childType, "_parent", "type=" + parentType, CHILD_SCORE_NAME, "type=double,doc_values=false") .string()), true, false); return createSearchContext(indexService); }
public void testField() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") .endObject() .endObject() .string(); DocumentMapper mapper = parser.parse(mapping); assertEquals(mapping, serialize(mapper)); Mapper update = parse( mapper, parser, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update assertEquals("{\"type\":{\"properties\":{\"foo\":{\"type\":\"string\"}}}}", serialize(update)); }
public void testTimestampParsing() throws IOException { IndexService indexService = createIndex("test"); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); boolean enabled = randomBoolean(); indexMapping .startObject() .startObject("type") .startObject("_timestamp") .field("enabled", enabled) .endObject() .endObject() .endObject(); DocumentMapper documentMapper = indexService .mapperService() .parse("type", new CompressedXContent(indexMapping.string()), true); assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled)); assertTrue(documentMapper.timestampFieldMapper().fieldType().stored()); assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues()); documentMapper = indexService .mapperService() .parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true); assertThat(documentMapper.timestampFieldMapper().enabled(), equalTo(enabled)); assertTrue(documentMapper.timestampFieldMapper().fieldType().hasDocValues()); assertTrue(documentMapper.timestampFieldMapper().fieldType().stored()); }
@Override protected GetFieldMappingsResponse shardOperation( final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); Collection<String> typeIntersection; if (request.types().length == 0) { typeIntersection = indexService.mapperService().types(); } else { typeIntersection = indexService .mapperService() .types() .stream() .filter(type -> Regex.simpleMatch(request.types(), type)) .collect(Collectors.toCollection(ArrayList::new)); if (typeIntersection.isEmpty()) { throw new TypeMissingException(shardId.getIndex(), request.types()); } } MapBuilder<String, Map<String, FieldMappingMetaData>> typeMappings = new MapBuilder<>(); for (String type : typeIntersection) { DocumentMapper documentMapper = indexService.mapperService().documentMapper(type); Map<String, FieldMappingMetaData> fieldMapping = findFieldMappingsByType(documentMapper, request); if (!fieldMapping.isEmpty()) { typeMappings.put(type, fieldMapping); } } return new GetFieldMappingsResponse( singletonMap(shardId.getIndexName(), typeMappings.immutableMap())); }
@Override protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary( ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { final IndexRequest request = shardRequest.request; // validate, if routing is required, that we got routing IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); if (mappingMd != null && mappingMd.routing().required()) { if (request.routing() == null) { throw new RoutingMissingException( shardRequest.shardId.getIndex(), request.type(), request.id()); } } IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard); final IndexResponse response = result.response; final Translog.Location location = result.location; processAfter(request.refresh(), indexShard, location); return new Tuple<>(response, shardRequest.request); }
/** * Returns the shards to purge, i.e. the local started primary shards that have ttl enabled and * disable_purge to false */ private List<IndexShard> getShardsToPurge() { List<IndexShard> shardsToPurge = new ArrayList<>(); MetaData metaData = clusterService.state().metaData(); for (IndexService indexService : indicesService) { // check the value of disable_purge for this index IndexMetaData indexMetaData = metaData.index(indexService.index()); if (indexMetaData == null) { continue; } if (indexService.getIndexSettings().isTTLPurgeDisabled()) { continue; } // check if ttl is enabled for at least one type of this index boolean hasTTLEnabled = false; for (String type : indexService.mapperService().types()) { DocumentMapper documentType = indexService.mapperService().documentMapper(type); if (documentType.TTLFieldMapper().enabled()) { hasTTLEnabled = true; break; } } if (hasTTLEnabled) { for (IndexShard indexShard : indexService) { if (indexShard.state() == IndexShardState.STARTED && indexShard.routingEntry().primary() && indexShard.routingEntry().started()) { shardsToPurge.add(indexShard); } } } } return shardsToPurge; }
@Test public void testSizeParsing() throws IOException { IndexService indexService = createIndex("test", ImmutableSettings.settingsBuilder().build()); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); boolean enabled = randomBoolean(); indexMapping .startObject() .startObject("type") .startObject("_size") .field("enabled", enabled) .endObject() .endObject() .endObject(); DocumentMapper documentMapper = indexService .mapperService() .parse("type", new CompressedString(indexMapping.string()), true); assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled)); assertTrue(documentMapper.sizeFieldMapper().fieldType().stored()); documentMapper.refreshSource(); documentMapper = indexService .mapperService() .parse("type", new CompressedString(documentMapper.mappingSource().string()), true); assertThat(documentMapper.sizeFieldMapper().enabled(), equalTo(enabled)); }
public void testAutoBoost() throws Exception { for (boolean boost : new boolean[] {false, true}) { String index = "test_" + boost; IndexService indexService = createIndex( index, client() .admin() .indices() .prepareCreate(index) .addMapping("type", "foo", "type=string" + (boost ? ",boost=2" : ""))); client().prepareIndex(index, "type").setSource("foo", "bar").get(); client().admin().indices().prepareRefresh(index).get(); Query query = indexService .mapperService() .documentMapper("type") .allFieldMapper() .fieldType() .termQuery("bar", null); try (Searcher searcher = indexService.getShardOrNull(0).acquireSearcher("tests")) { query = searcher.searcher().rewrite(query); final Class<?> expected = boost ? AllTermQuery.class : TermQuery.class; assertThat(query, Matchers.instanceOf(expected)); } } }
public void testAllowNoAdditionalSettings() throws Exception { addQueryMapping(); IndexService indexService = createIndex("test1", Settings.EMPTY); MapperService mapperService = indexService.mapperService(); String percolatorMapper = XContentFactory.jsonBuilder() .startObject() .startObject(typeName) .startObject("properties") .startObject(fieldName) .field("type", "percolator") .field("index", "no") .endObject() .endObject() .endObject() .endObject() .string(); try { mapperService.merge( typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); fail("MapperParsingException expected"); } catch (MapperParsingException e) { assertThat( e.getMessage(), equalTo( "Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]")); } }
public void testObject() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") .endObject() .endObject() .string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse( mapper, parser, XContentFactory.jsonBuilder() .startObject() .startObject("foo") .startObject("bar") .field("baz", "foo") .endObject() .endObject() .endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update assertEquals( XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("foo") .startObject("properties") .startObject("bar") .startObject("properties") .startObject("baz") .field("type", "text") .startObject("fields") .startObject("keyword") .field("type", "keyword") .field("ignore_above", 256) .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .string(), serialize(update)); }
private MapperService getMapperService() { ClusterState clusterState = internalCluster().clusterService().state(); ShardRouting shardRouting = clusterState.routingTable().index(INDEX_NAME).shard(0).getShards().get(0); String nodeName = clusterState.getNodes().get(shardRouting.currentNodeId()).getName(); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); IndexService indexService = indicesService.indexService(INDEX_NAME); return indexService.mapperService(); }
public void testIncremental() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); // Make sure that mapping updates are incremental, this is important for performance otherwise // every new field introduction runs in linear time with the total number of fields String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("foo") .field("type", "text") .endObject() .endObject() .endObject() .string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, serialize(mapper)); Mapper update = parse( mapper, parser, XContentFactory.jsonBuilder() .startObject() .field("foo", "bar") .field("bar", "baz") .endObject()); assertNotNull(update); // original mapping not modified assertEquals(mapping, serialize(mapper)); // but we have an update assertEquals( XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("properties") // foo is NOT in the update .startObject("bar") .field("type", "text") .startObject("fields") .startObject("keyword") .field("type", "keyword") .field("ignore_above", 256) .endObject() .endObject() .endObject() .endObject() .endObject() .string(), serialize(update)); }
public void testComplexArray() throws Exception { IndexService indexService = createIndex("test"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") .endObject() .endObject() .string(); DocumentMapper mapper = parser.parse(mapping); assertEquals(mapping, serialize(mapper)); Mapper update = parse( mapper, parser, XContentFactory.jsonBuilder() .startObject() .startArray("foo") .startObject() .field("bar", "baz") .endObject() .startObject() .field("baz", 3) .endObject() .endArray() .endObject()); assertEquals(mapping, serialize(mapper)); assertEquals( XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("foo") .startObject("properties") .startObject("bar") .field("type", "string") .endObject() .startObject("baz") .field("type", "long") .endObject() .endObject() .endObject() .endObject() .endObject() .endObject() .string(), serialize(update)); }
@Override protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); final QueryShardContext queryShardContext = indexService.newQueryShardContext(); queryShardContext.setTypes(request.types()); boolean valid; String explanation = null; String error = null; Engine.Searcher searcher = indexShard.acquireSearcher("validate_query"); DefaultSearchContext searchContext = new DefaultSearchContext( 0, new ShardSearchLocalRequest( request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT); SearchContext.setCurrent(searchContext); try { searchContext.parsedQuery(queryShardContext.toQuery(request.query())); searchContext.preProcess(); valid = true; if (request.rewrite()) { explanation = getRewrittenQuery(searcher.searcher(), searchContext.query()); } else if (request.explain()) { explanation = searchContext.filteredQuery().query().toString(); } } catch (QueryShardException | ParsingException e) { valid = false; error = e.getDetailedMessage(); } catch (AssertionError | IOException e) { valid = false; error = e.getMessage(); } finally { searchContext.close(); SearchContext.removeCurrent(); } return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error); }
public void testSizeTimestampIndexParsing() throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().build()); String mapping = copyToStringFromClasspath( "/org/elasticsearch/index/mapper/update/default_mapping_with_disabled_root_types.json"); DocumentMapper documentMapper = indexService.mapperService().parse("type", new CompressedXContent(mapping), true); assertThat(documentMapper.mappingSource().string(), equalTo(mapping)); documentMapper = indexService .mapperService() .parse("type", new CompressedXContent(documentMapper.mappingSource().string()), true); assertThat(documentMapper.mappingSource().string(), equalTo(mapping)); }
public void testAllMappersNoBoost() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/noboost-mapping.json"); IndexService index = createIndex("test"); DocumentMapper docMapper = index.mapperService().documentMapperParser().parse(mapping); byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/all/test1.json"); Document doc = docMapper.parse("test", "person", "1", new BytesArray(json)).rootDoc(); AllField field = (AllField) doc.getField("_all"); AllEntries allEntries = field.getAllEntries(); assertThat(allEntries.fields().size(), equalTo(3)); assertThat(allEntries.fields().contains("address.last.location"), equalTo(true)); assertThat(allEntries.fields().contains("name.last"), equalTo(true)); assertThat(allEntries.fields().contains("simple1"), equalTo(true)); assertThat(field.fieldType().omitNorms(), equalTo(false)); }
public void testJoinFieldSet() throws Exception { String parentMapping = XContentFactory.jsonBuilder() .startObject() .startObject("parent_type") .endObject() .endObject() .string(); String childMapping = XContentFactory.jsonBuilder() .startObject() .startObject("child_type") .startObject("_parent") .field("type", "parent_type") .endObject() .endObject() .endObject() .string(); IndexService indexService = createIndex("test"); indexService .mapperService() .merge( "parent_type", new CompressedXContent(parentMapping), MergeReason.MAPPING_UPDATE, false); indexService .mapperService() .merge( "child_type", new CompressedXContent(childMapping), MergeReason.MAPPING_UPDATE, false); // Indexing parent doc: DocumentMapper parentDocMapper = indexService.mapperService().documentMapper("parent_type"); ParsedDocument doc = parentDocMapper.parse( SourceToParse.source("test", "parent_type", "1122", new BytesArray("{}"))); assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); // Indexing child doc: DocumentMapper childDocMapper = indexService.mapperService().documentMapper("child_type"); doc = childDocMapper.parse( SourceToParse.source("test", "child_type", "1", new BytesArray("{}")).parent("1122")); assertEquals(1, getNumberOfFieldWithParentPrefix(doc.rootDoc())); assertEquals("1122", doc.rootDoc().getBinaryValue("_parent#parent_type").utf8ToString()); }
@Before public void configure() { Injector injector = new ModulesBuilder() .add( new CircuitBreakerModule(), new OperatorModule(), new TestModule(), new SysNodeExpressionModule()) .createInjector(); Injector shard0Injector = injector.createChildInjector(new TestShardModule(0)); Injector shard1Injector = injector.createChildInjector(new TestShardModule(1)); functions = injector.getInstance(Functions.class); IndicesService indicesService = injector.getInstance(IndicesService.class); indexService = injector.getInstance(IndexService.class); when(indexService.shardInjectorSafe(0)).thenReturn(shard0Injector); when(indexService.shardInjectorSafe(1)).thenReturn(shard1Injector); when(indexService.shardSafe(0)).thenReturn(shard0Injector.getInstance(IndexShard.class)); when(indexService.shardSafe(1)).thenReturn(shard1Injector.getInstance(IndexShard.class)); when(indicesService.indexServiceSafe(TEST_TABLE_NAME)).thenReturn(indexService); NodeSettingsService nodeSettingsService = mock(NodeSettingsService.class); jobContextService = new JobContextService(ImmutableSettings.EMPTY, testThreadPool, mock(StatsTables.class)); ClusterService clusterService = injector.getInstance(ClusterService.class); operation = new MapSideDataCollectOperation( clusterService, ImmutableSettings.EMPTY, mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS.get()), injector.getInstance(BulkRetryCoordinatorPool.class), functions, injector.getInstance(ReferenceResolver.class), injector.getInstance(NodeSysExpression.class), indicesService, testThreadPool, new CollectServiceResolver( discoveryService, new SystemCollectService( discoveryService, functions, new StatsTables(ImmutableSettings.EMPTY, nodeSettingsService))), mock(InformationSchemaCollectService.class), mock(UnassignedShardsCollectService.class)); }
public DefaultSearchContext( long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, IndexService indexService, IndexShard indexShard, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, Counter timeEstimateCounter) { this.id = id; this.request = request; this.searchType = request.searchType(); this.shardTarget = shardTarget; this.engineSearcher = engineSearcher; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; // SearchContexts use a BigArrays that can circuit break this.bigArrays = bigArrays.withCircuitBreaking(); this.dfsResult = new DfsSearchResult(id, shardTarget); this.queryResult = new QuerySearchResult(id, shardTarget); this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; this.searcher = new ContextIndexSearcher(this, engineSearcher); // initialize the filtering alias based on the provided filters aliasFilter = indexService.aliasesService().aliasFilter(request.filteringAliases()); this.timeEstimateCounter = timeEstimateCounter; }
public PercolateContext( PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, Query aliasFilter, ParseFieldMatcher parseFieldMatcher) { super(parseFieldMatcher, request); this.indexShard = indexShard; this.indexService = indexService; this.fieldDataService = indexService.fieldData(); this.searchShardTarget = searchShardTarget; this.percolateQueryRegistry = indexShard.percolateRegistry(); this.types = new String[] {request.documentType()}; this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.querySearchResult = new QuerySearchResult(0, searchShardTarget); this.engineSearcher = indexShard.acquireSearcher("percolate"); this.searcher = new ContextIndexSearcher(this, engineSearcher); this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); this.aliasFilter = aliasFilter; this.startTime = request.getStartTime(); }
@Before public void init() throws Exception { indexService = createIndex("test", Settings.EMPTY); mapperService = indexService.mapperService(); String mapper = XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("_field_names") .field("enabled", false) .endObject() // makes testing easier .startObject("properties") .startObject("field") .field("type", "text") .endObject() .startObject("number_field") .field("type", "long") .endObject() .startObject("date_field") .field("type", "date") .endObject() .endObject() .endObject() .endObject() .string(); mapperService.merge( "type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true); }
private void applyAliases(ClusterChangedEvent event) { // check if aliases changed if (aliasesChanged(event)) { // go over and update aliases for (IndexMetaData indexMetaData : event.state().metaData()) { String index = indexMetaData.index(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { // we only create / update here continue; } IndexAliasesService indexAliasesService = indexService.aliasesService(); indexAliasesService.setAliases(indexMetaData.getAliases()); } } }
public void testEmptyName() throws IOException { String mapping = XContentFactory.jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("") .field("type", "ip") .endObject() .endObject() .endObject() .endObject() .string(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> parser.parse("type", new CompressedXContent(mapping))); assertThat(e.getMessage(), containsString("name cannot be empty string")); // before 5.x Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); indexService = createIndex("test_old", oldIndexSettings); parser = indexService.mapperService().documentMapperParser(); DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, defaultMapper.mappingSource().string()); }
public void testIndexIntoDefaultMapping() throws Throwable { // 1. test implicit index creation try { client() .prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1") .setSource("{") .execute() .get(); fail(); } catch (Throwable t) { if (t instanceof ExecutionException) { t = ((ExecutionException) t).getCause(); } final Throwable throwable = ExceptionsHelper.unwrapCause(t); if (throwable instanceof IllegalArgumentException) { assertEquals( "It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); } else { throw t; } } // 2. already existing index IndexService indexService = createIndex("index2"); try { client() .prepareIndex("index2", MapperService.DEFAULT_MAPPING, "2") .setSource() .execute() .get(); fail(); } catch (Throwable t) { if (t instanceof ExecutionException) { t = ((ExecutionException) t).getCause(); } final Throwable throwable = ExceptionsHelper.unwrapCause(t); if (throwable instanceof IllegalArgumentException) { assertEquals( "It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); } else { throw t; } } assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING)); }
private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException { XContentParser sourceParser = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE .xContent() .createParser(actual.bytes, actual.offset, actual.length); QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser); assertThat(qsc.parseInnerQueryBuilder().get(), equalTo(expected)); }
private void applyCleanedIndices(final ClusterChangedEvent event) { // handle closed indices, since they are not allocated on a node once they are closed // so applyDeletedIndices might not take them into account for (IndexService indexService : indicesService) { String index = indexService.index().getName(); IndexMetaData indexMetaData = event.state().metaData().index(index); if (indexMetaData != null && indexMetaData.state() == IndexMetaData.State.CLOSE) { for (Integer shardId : indexService.shardIds()) { logger.debug("[{}][{}] removing shard (index is closed)", index, shardId); try { indexService.removeShard(shardId, "removing shard (index is closed)"); } catch (Throwable e) { logger.warn("[{}] failed to remove shard (index is closed)", e, index); } } } } for (IndexService indexService : indicesService) { String index = indexService.index().getName(); if (indexService.shardIds().isEmpty()) { if (logger.isDebugEnabled()) { logger.debug("[{}] cleaning index (no shards allocated)", index); } // clean the index removeIndex(index, "removing index (no shards allocated)"); } } }
@Override public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { if (shardId != null) { final IndexShard shard = indexService.getShardOrNull(shardId.id()); if (shard != null) { shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes); } } }
@Override public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) { if (shardId != null) { final IndexShard shard = indexService.getShardOrNull(shardId.id()); if (shard != null) { shard.fieldData().onCache(shardId, fieldName, ramUsage); } } }
public void testTypes() throws Exception { IndexService indexService1 = createIndex("index1"); MapperService mapperService = indexService1.mapperService(); assertEquals(Collections.emptySet(), mapperService.types()); mapperService.merge("type1", new CompressedXContent("{\"type1\":{}}"), true, false); assertNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); mapperService.merge( MapperService.DEFAULT_MAPPING, new CompressedXContent("{\"_default_\":{}}"), true, false); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(Collections.singleton("type1"), mapperService.types()); mapperService.merge("type2", new CompressedXContent("{\"type2\":{}}"), true, false); assertNotNull(mapperService.documentMapper(MapperService.DEFAULT_MAPPING)); assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types()); }