@Test public void testIpMultiField() throws Exception { assertAcked( client() .admin() .indices() .prepareCreate("my-index") .addMapping("my-type", createMappingSource("ip"))); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get(); MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type"); assertThat(mappingMetaData, not(nullValue())); Map<String, Object> mappingSource = mappingMetaData.sourceAsMap(); Map aField = ((Map) XContentMapValues.extractValue("properties.a", mappingSource)); assertThat(aField.size(), equalTo(2)); assertThat(aField.get("type").toString(), equalTo("ip")); assertThat(aField.get("fields"), notNullValue()); Map bField = ((Map) XContentMapValues.extractValue("properties.a.fields.b", mappingSource)); assertThat(bField.size(), equalTo(2)); assertThat(bField.get("type").toString(), equalTo("string")); assertThat(bField.get("index").toString(), equalTo("not_analyzed")); client() .prepareIndex("my-index", "my-type", "1") .setSource("a", "127.0.0.1") .setRefresh(true) .get(); CountResponse countResponse = client().prepareCount("my-index").setQuery(matchQuery("a.b", "127.0.0.1")).get(); assertThat(countResponse.getCount(), equalTo(1l)); }
public void process( @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) { if (mappingMd != null) { // might as well check for routing here if (mappingMd.routing().required() && routing == null) { throw new RoutingMissingException(concreteIndex, type, id); } if (parent != null && !mappingMd.hasParentField()) { throw new IllegalArgumentException( "Can't specify parent if no parent field has been configured"); } } else { if (parent != null) { throw new IllegalArgumentException( "Can't specify parent if no parent field has been configured"); } } // generate id if not already provided and id generation is allowed if (allowIdGeneration && id == null) { assert autoGeneratedTimestamp == -1; autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia id(UUIDs.base64UUID()); } }
@Override public ClusterState execute(ClusterState currentState) throws Exception { if (cancellableThreads.isCancelled() == false) { // no need to run this if recovery is canceled IndexMetaData indexMetaData = clusterService.state().metaData().getIndices().get(indexService.index().getName()); ImmutableOpenMap<String, MappingMetaData> metaDataMappings = null; if (indexMetaData != null) { metaDataMappings = indexMetaData.getMappings(); } // default mapping should not be sent back, it can only be updated by put mapping API, and // its // a full in place replace, we don't want to override a potential update coming into it for (DocumentMapper documentMapper : indexService.mapperService().docMappers(false)) { MappingMetaData mappingMetaData = metaDataMappings == null ? null : metaDataMappings.get(documentMapper.type()); if (mappingMetaData == null || !documentMapper.refreshSource().equals(mappingMetaData.source())) { // not on master yet in the right form documentMappersToUpdate.add(documentMapper); } } } return currentState; }
@Override protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary( ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { final IndexRequest request = shardRequest.request; // validate, if routing is required, that we got routing IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); if (mappingMd != null && mappingMd.routing().required()) { if (request.routing() == null) { throw new RoutingMissingException( shardRequest.shardId.getIndex(), request.type(), request.id()); } } IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard); final IndexResponse response = result.response; final Translog.Location location = result.location; processAfter(request.refresh(), indexShard, location); return new Tuple<>(response, shardRequest.request); }
private static void assertMappingsHaveField( GetMappingsResponse mappings, String index, String type, String field) throws IOException { ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.getMappings().get("index"); assertNotNull(indexMappings); MappingMetaData typeMappings = indexMappings.get(type); assertNotNull(typeMappings); Map<String, Object> typeMappingsMap = typeMappings.getSourceAsMap(); Map<String, Object> properties = (Map<String, Object>) typeMappingsMap.get("properties"); assertTrue( "Could not find [" + field + "] in " + typeMappingsMap.toString(), properties.containsKey(field)); }
private WriteResult shardIndexOperation( BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState, IndexShard indexShard, boolean processed) throws Throwable { // validate, if routing is required, that we got routing MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type()); if (mappingMd != null && mappingMd.routing().required()) { if (indexRequest.routing() == null) { throw new RoutingMissingException(request.index(), indexRequest.type(), indexRequest.id()); } } if (!processed) { indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index()); } return executeIndexRequestOnPrimary(request, indexRequest, indexShard); }
/** * Until we can verify that all instances have moved over to our new mapping, we need to handcheck * all index mappings to make sure they have doc.associations.assoc_index as a doc value * * @param communityIdStrs * @return */ @SuppressWarnings("unchecked") private static boolean validateAssociationMapping(String[] communityIdStrs) { // get all index mappings associated with these commids String[] mappings = new String[communityIdStrs.length]; StringBuilder sb = new StringBuilder(", "); for (int i = 0; i < communityIdStrs.length; i++) { String s = communityIdStrs[i]; mappings[i] = "doc_" + s + "*"; sb.append("doc_").append(s).append("*, "); } ElasticSearchManager esm = ElasticSearchManager.getIndex(sb.substring(2, sb.length())); GetMappingsResponse response = esm.getRawClient().admin().indices().prepareGetMappings(mappings).get(); for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> mapping : response.getMappings()) { ImmutableOpenMap<String, MappingMetaData> mappingVal = mapping.value; MappingMetaData mapping_meta = mappingVal.get("document_index"); try { Map<String, Object> map = mapping_meta.getSourceAsMap(); Map<String, Object> props = (Map<String, Object>) map.get("properties"); Map<String, Object> assocs = (Map<String, Object>) props.get(DocumentPojo.associations_); Map<String, Object> assocs_props = (Map<String, Object>) assocs.get("properties"); Map<String, Object> assoc_index = (Map<String, Object>) assocs_props.get(AssociationPojo.assoc_index_); if (!assoc_index.containsKey("doc_values") || !((Boolean) assoc_index.get("doc_values"))) { // doc values doesn't exist in mapping or was false return false; } } catch (Exception ex) { // failed somehow return false; } } // if we fell through, all the checked indexes had the doc_value field set return true; }
@NotNull protected EsMapping readMapping() { try { try { IndexMetaData indexMetaData = getIndexMetaData(); if (indexMetaData != null) { MappingMetaData metaData = indexMetaData.mapping(getIndexName()); if (metaData != null) { byte[] mappingSource = metaData.source().uncompressed(); return new EsMapping(getIndexName(), mappingSource); } } mappingCreated = false; return new EsMapping(getIndexName()); } catch (IndexMissingException e) { mappingCreated = false; return new EsMapping(getIndexName()); } } catch (IOException e) { throw new RuntimeException(e); } }
private void executeBulk( final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener) { ClusterState clusterState = clusterService.state(); for (ActionRequest request : bulkRequest.requests) { if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; indexRequest.routing( clusterState .metaData() .resolveIndexRouting(indexRequest.routing(), indexRequest.index())); indexRequest.index(clusterState.metaData().concreteIndex(indexRequest.index())); if (allowIdGeneration) { if (indexRequest.id() == null) { indexRequest.id(UUID.randomBase64UUID()); // since we generate the id, change it to CREATE indexRequest.opType(IndexRequest.OpType.CREATE); } } } else if (request instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request; deleteRequest.index(clusterState.metaData().concreteIndex(deleteRequest.index())); } } final BulkItemResponse[] responses = new BulkItemResponse[bulkRequest.requests.size()]; // first, go over all the requests and create a ShardId -> Operations mapping Map<ShardId, List<BulkItemRequest>> requestsByShard = Maps.newHashMap(); for (int i = 0; i < bulkRequest.requests.size(); i++) { ActionRequest request = bulkRequest.requests.get(i); if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; // handle routing MappingMetaData mappingMd = clusterState.metaData().index(indexRequest.index()).mapping(indexRequest.type()); if (mappingMd != null) { try { indexRequest.processRouting(mappingMd); } catch (ElasticSearchException e) { responses[i] = new BulkItemResponse( i, indexRequest.opType().toString().toLowerCase(), new BulkItemResponse.Failure( indexRequest.index(), indexRequest.type(), indexRequest.id(), e.getDetailedMessage())); continue; } } ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, indexRequest.index(), indexRequest.type(), indexRequest.id(), indexRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = Lists.newArrayList(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } else if (request instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request; MappingMetaData mappingMd = clusterState.metaData().index(deleteRequest.index()).mapping(deleteRequest.type()); if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) { // if routing is required, and no routing on the delete request, we need to broadcast // it.... GroupShardsIterator groupShards = clusterService .operationRouting() .broadcastDeleteShards(clusterState, deleteRequest.index()); for (ShardIterator shardIt : groupShards) { List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId()); if (list == null) { list = Lists.newArrayList(); requestsByShard.put(shardIt.shardId(), list); } list.add(new BulkItemRequest(i, request)); } } else { ShardId shardId = clusterService .operationRouting() .deleteShards( clusterState, deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = Lists.newArrayList(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } } } if (requestsByShard.isEmpty()) { listener.onResponse(new BulkResponse(responses, System.currentTimeMillis() - startTime)); return; } final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List<BulkItemRequest> requests = entry.getValue(); BulkShardRequest bulkShardRequest = new BulkShardRequest( shardId.index().name(), shardId.id(), bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.replicationType(bulkRequest.replicationType()); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); shardBulkAction.execute( bulkShardRequest, new ActionListener<BulkShardResponse>() { @Override public void onResponse(BulkShardResponse bulkShardResponse) { synchronized (responses) { for (BulkItemResponse bulkItemResponse : bulkShardResponse.responses()) { responses[bulkItemResponse.itemId()] = bulkItemResponse; } } if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable e) { // create failures for all relevant requests String message = ExceptionsHelper.detailedMessage(e); synchronized (responses) { for (BulkItemRequest request : requests) { if (request.request() instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request.request(); responses[request.id()] = new BulkItemResponse( request.id(), indexRequest.opType().toString().toLowerCase(), new BulkItemResponse.Failure( indexRequest.index(), indexRequest.type(), indexRequest.id(), message)); } else if (request.request() instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request.request(); responses[request.id()] = new BulkItemResponse( request.id(), "delete", new BulkItemResponse.Failure( deleteRequest.index(), deleteRequest.type(), deleteRequest.id(), message)); } } } if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { listener.onResponse( new BulkResponse(responses, System.currentTimeMillis() - startTime)); } }); } }
@Override protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary( ClusterState clusterState, PrimaryOperationRequest shardRequest) { final IndexRequest request = shardRequest.request; // validate, if routing is required, that we got routing IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); if (mappingMd != null && mappingMd.routing().required()) { if (request.routing() == null) { throw new RoutingMissingException( shardRequest.shardId.getIndex(), request.type(), request.id()); } } IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()) .type(request.type()) .id(request.id()) .routing(request.routing()) .parent(request.parent()) .timestamp(request.timestamp()) .ttl(request.ttl()); long version; boolean created; Engine.IndexingOperation op; if (request.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard.prepareIndex( sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); if (index.parsedDoc().mappingsModified()) { mappingUpdatedAction.updateMappingOnMaster( shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID()); } indexShard.index(index); version = index.version(); op = index; created = index.created(); } else { Engine.Create create = indexShard.prepareCreate( sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); if (create.parsedDoc().mappingsModified()) { mappingUpdatedAction.updateMappingOnMaster( shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID()); } indexShard.create(create); version = create.version(); op = create; created = true; } if (request.refresh()) { try { indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false)); } catch (Throwable e) { // ignore } } // update the version on the request, so it will be used for the replicas request.version(version); request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); assert request.versionType().validateVersionForWrites(request.version()); IndexResponse response = new IndexResponse( shardRequest.shardId.getIndex(), request.type(), request.id(), version, created); return new PrimaryResponse<>(shardRequest.request, response, op); }
private void applyMappings(ClusterChangedEvent event) { // go over and update mappings for (IndexMetaData indexMetaData : event.state().metaData()) { if (!indicesService.hasIndex(indexMetaData.index())) { // we only create / update here continue; } List<String> typesToRefresh = Lists.newArrayList(); String index = indexMetaData.index(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { // got deleted on us, ignore (closing the node) return; } try { MapperService mapperService = indexService.mapperService(); // first, go over and update the _default_ mapping (if exists) if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) { boolean requireRefresh = processMapping( index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source()); if (requireRefresh) { typesToRefresh.add(MapperService.DEFAULT_MAPPING); } } // go over and add the relevant mappings (or update them) for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) { MappingMetaData mappingMd = cursor.value; String mappingType = mappingMd.type(); CompressedXContent mappingSource = mappingMd.source(); if (mappingType.equals(MapperService.DEFAULT_MAPPING)) { // we processed _default_ first continue; } boolean requireRefresh = processMapping(index, mapperService, mappingType, mappingSource); if (requireRefresh) { typesToRefresh.add(mappingType); } } if (!typesToRefresh.isEmpty() && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( event.state(), new NodeMappingRefreshAction.NodeMappingRefreshRequest( index, indexMetaData.indexUUID(), typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())); } } catch (Throwable t) { // if we failed the mappings anywhere, we need to fail the shards for this index, note, we // safeguard // by creating the processing the mappings on the master, or on the node the mapping was // introduced on, // so this failure typically means wrong node level configuration or something similar for (IndexShard indexShard : indexService) { ShardRouting shardRouting = indexShard.routingEntry(); failAndRemoveShard(shardRouting, indexService, true, "failed to update mappings", t); } } } }
public void process( MetaData metaData, String aliasOrIndex, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration) throws ElasticsearchException { // resolve the routing if needed routing(metaData.resolveIndexRouting(routing, aliasOrIndex)); // resolve timestamp if provided externally if (timestamp != null) { timestamp = MappingMetaData.Timestamp.parseStringTimestamp( timestamp, mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER); } // extract values if needed if (mappingMd != null) { MappingMetaData.ParseContext parseContext = mappingMd.createParseContext(id, routing, timestamp); if (parseContext.shouldParse()) { XContentParser parser = null; try { parser = XContentHelper.createParser(source); mappingMd.parse(parser, parseContext); if (parseContext.shouldParseId()) { id = parseContext.id(); } if (parseContext.shouldParseRouting()) { routing = parseContext.routing(); } if (parseContext.shouldParseTimestamp()) { timestamp = parseContext.timestamp(); timestamp = MappingMetaData.Timestamp.parseStringTimestamp( timestamp, mappingMd.timestamp().dateTimeFormatter()); } } catch (Exception e) { throw new ElasticsearchParseException( "failed to parse doc to extract routing/timestamp", e); } finally { if (parser != null) { parser.close(); } } } // might as well check for routing here if (mappingMd.routing().required() && routing == null) { throw new RoutingMissingException(index, type, id); } if (parent != null && !mappingMd.hasParentField()) { throw new ElasticsearchIllegalArgumentException( "Can't specify parent if no parent field has been configured"); } } else { if (parent != null) { throw new ElasticsearchIllegalArgumentException( "Can't specify parent if no parent field has been configured"); } } // generate id if not already provided and id generation is allowed if (allowIdGeneration) { if (id == null) { id(Strings.randomBase64UUID()); // since we generate the id, change it to CREATE opType(IndexRequest.OpType.CREATE); } } // generate timestamp if not provided, we always have one post this stage... if (timestamp == null) { timestamp = Long.toString(System.currentTimeMillis()); } }
@SuppressWarnings("unchecked") @Test public void updateIncludeExclude() throws Exception { assertAcked( prepareCreate("test") .addMapping( "type", jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("normal") .field("type", "long") .endObject() .startObject("exclude") .field("type", "long") .endObject() .startObject("include") .field("type", "long") .endObject() .endObject() .endObject() .endObject())); ensureGreen(); // make sure that replicas are initialized so the refresh command will work them // too logger.info("Index doc"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 1) .field("exclude", 1) .field("include", 1) .endObject()); refresh(); // commit it for later testing. logger.info("Adding exclude settings"); PutMappingResponse putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("exclude") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); // changed mapping doesn't affect indexed documents (checking backward compatibility) GetResponse getResponse = client().prepareGet("test", "type", "1").setRealtime(false).get(); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), hasKey("exclude")); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Index doc again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 2) .field("exclude", 1) .field("include", 2) .endObject()); // but do affect newly indexed docs getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Changing mapping to includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .endArray() .startArray("includes") .value("include") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); MappingMetaData typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); ArrayList<String> includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); assertThat( (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable()); logger.info("Indexing doc yet again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 3) .field("exclude", 3) .field("include", 3) .endObject()); getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), not(hasKey("normal"))); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Adding excludes, but keep includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("*.excludes") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); ArrayList<String> excludes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"); assertThat(excludes, contains("*.excludes")); }
@Test public void testMultiFields() throws Exception { assertAcked( client() .admin() .indices() .prepareCreate("my-index") .addMapping("my-type", createTypeSource())); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get(); MappingMetaData mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type"); assertThat(mappingMetaData, not(nullValue())); Map<String, Object> mappingSource = mappingMetaData.sourceAsMap(); Map titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource)); assertThat(titleFields.size(), equalTo(1)); assertThat(titleFields.get("not_analyzed"), notNullValue()); assertThat( ((Map) titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed")); client() .prepareIndex("my-index", "my-type", "1") .setSource("title", "Multi fields") .setRefresh(true) .get(); SearchResponse searchResponse = client().prepareSearch("my-index").setQuery(matchQuery("title", "multi")).get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); searchResponse = client() .prepareSearch("my-index") .setQuery(matchQuery("title.not_analyzed", "Multi fields")) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); assertAcked( client() .admin() .indices() .preparePutMapping("my-index") .setType("my-type") .setSource(createPutMappingSource()) .setIgnoreConflicts( true) // If updated with multi-field type, we need to ignore failures. ); getMappingsResponse = client().admin().indices().prepareGetMappings("my-index").get(); mappingMetaData = getMappingsResponse.mappings().get("my-index").get("my-type"); assertThat(mappingMetaData, not(nullValue())); mappingSource = mappingMetaData.sourceAsMap(); assertThat( ((Map) XContentMapValues.extractValue("properties.title", mappingSource)).size(), equalTo(2)); titleFields = ((Map) XContentMapValues.extractValue("properties.title.fields", mappingSource)); assertThat(titleFields.size(), equalTo(2)); assertThat(titleFields.get("not_analyzed"), notNullValue()); assertThat( ((Map) titleFields.get("not_analyzed")).get("index").toString(), equalTo("not_analyzed")); assertThat(titleFields.get("uncased"), notNullValue()); assertThat( ((Map) titleFields.get("uncased")).get("analyzer").toString(), equalTo("whitespace")); client() .prepareIndex("my-index", "my-type", "1") .setSource("title", "Multi fields") .setRefresh(true) .get(); searchResponse = client().prepareSearch("my-index").setQuery(matchQuery("title.uncased", "Multi")).get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); }
public void testOldPercolatorIndex() throws Exception { setupNode(); // verify cluster state: ClusterState state = client().admin().cluster().prepareState().get().getState(); assertThat(state.metaData().indices().size(), equalTo(1)); assertThat(state.metaData().indices().get(INDEX_NAME), notNullValue()); assertThat( state.metaData().indices().get(INDEX_NAME).getCreationVersion(), equalTo(Version.V_2_0_0)); assertThat( state.metaData().indices().get(INDEX_NAME).getUpgradedVersion(), equalTo(Version.CURRENT)); assertThat(state.metaData().indices().get(INDEX_NAME).getMappings().size(), equalTo(2)); assertThat( state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator"), notNullValue()); // important: verify that the query field in the .percolator mapping is of type object (from // 3.0.0 this is of type percolator) MappingMetaData mappingMetaData = state.metaData().indices().get(INDEX_NAME).getMappings().get(".percolator"); assertThat( XContentMapValues.extractValue("properties.query.type", mappingMetaData.sourceAsMap()), equalTo("object")); assertThat( state.metaData().indices().get(INDEX_NAME).getMappings().get("message"), notNullValue()); // verify existing percolator queries: SearchResponse searchResponse = client() .prepareSearch(INDEX_NAME) .setTypes(".percolator") .addSort("_id", SortOrder.ASC) .get(); assertThat(searchResponse.getHits().getTotalHits(), equalTo(3L)); assertThat(searchResponse.getHits().getAt(0).id(), equalTo("1")); assertThat(searchResponse.getHits().getAt(1).id(), equalTo("2")); assertThat(searchResponse.getHits().getAt(2).id(), equalTo("3")); // verify percolate response PercolateResponse percolateResponse = client() .preparePercolate() .setIndices(INDEX_NAME) .setDocumentType("message") .setPercolateDoc( new PercolateSourceBuilder.DocBuilder() .setDoc("message", "the quick brown fox jumps over the lazy dog")) .get(); assertThat(percolateResponse.getCount(), equalTo(2L)); assertThat(percolateResponse.getMatches().length, equalTo(2)); assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); // add an extra query and verify the results client() .prepareIndex(INDEX_NAME, ".percolator", "4") .setSource( jsonBuilder() .startObject() .field("query", matchQuery("message", "fox jumps")) .endObject()) .get(); refresh(); percolateResponse = client() .preparePercolate() .setIndices(INDEX_NAME) .setDocumentType("message") .setPercolateDoc( new PercolateSourceBuilder.DocBuilder() .setDoc("message", "the quick brown fox jumps over the lazy dog")) .get(); assertThat(percolateResponse.getCount(), equalTo(3L)); assertThat(percolateResponse.getMatches().length, equalTo(3)); assertThat(percolateResponse.getMatches()[0].getId().string(), equalTo("1")); assertThat(percolateResponse.getMatches()[1].getId().string(), equalTo("2")); assertThat(percolateResponse.getMatches()[2].getId().string(), equalTo("4")); }
@Override protected PrimaryResponse<IngestShardResponse, IngestShardRequest> shardOperationOnPrimary( ClusterState clusterState, PrimaryOperationRequest shardRequest) { final IngestShardRequest request = shardRequest.request; IndexShard indexShard = indicesService .indexServiceSafe(shardRequest.request.index()) .shardSafe(shardRequest.shardId); int successSize = 0; List<IngestItemFailure> failure = newLinkedList(); int size = request.items().size(); long[] versions = new long[size]; Set<Tuple<String, String>> mappingsToUpdate = newHashSet(); for (int i = 0; i < size; i++) { IngestItemRequest item = request.items().get(i); if (item.request() instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) item.request(); Engine.IndexingOperation op = null; try { // validate, if routing is required, that we got routing MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type()); if (mappingMd != null && mappingMd.routing().required()) { if (indexRequest.routing() == null) { throw new RoutingMissingException( indexRequest.index(), indexRequest.type(), indexRequest.id()); } } SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()) .type(indexRequest.type()) .id(indexRequest.id()) .routing(indexRequest.routing()) .parent(indexRequest.parent()) .timestamp(indexRequest.timestamp()) .ttl(indexRequest.ttl()); long version; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { Engine.Index index = indexShard .prepareIndex(sourceToParse) .version(indexRequest.version()) .versionType(indexRequest.versionType()) .origin(Engine.Operation.Origin.PRIMARY); op = index; indexShard.index(index); version = index.version(); } else { Engine.Create create = indexShard .prepareCreate(sourceToParse) .version(indexRequest.version()) .versionType(indexRequest.versionType()) .origin(Engine.Operation.Origin.PRIMARY); op = create; indexShard.create(create); version = create.version(); } versions[i] = indexRequest.version(); // update the version on request so it will happen on the replicas indexRequest.version(version); successSize++; } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to // handle it if (retryPrimaryException(e)) { // restore updated versions... for (int j = 0; j < i; j++) { applyVersion(request.items().get(j), versions[j]); } logger.error(e.getMessage(), e); throw new ElasticsearchException(e.getMessage()); } if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { logger.error( "[{}][{}] failed to execute bulk item (index) {}", e, shardRequest.request.index(), shardRequest.shardId, indexRequest); } else { logger.error( "[{}][{}] failed to execute bulk item (index) {}", e, shardRequest.request.index(), shardRequest.shardId, indexRequest); } failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e))); // nullify the request so it won't execute on the replicas request.items().set(i, null); } finally { // update mapping on master if needed, we won't update changes to the same type, since // once its changed, it won't have mappers added if (op != null && op.parsedDoc().mappingsModified()) { mappingsToUpdate.add(Tuple.tuple(indexRequest.index(), indexRequest.type())); } } } else if (item.request() instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) item.request(); try { Engine.Delete delete = indexShard .prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version()) .versionType(deleteRequest.versionType()) .origin(Engine.Operation.Origin.PRIMARY); indexShard.delete(delete); // update the request with teh version so it will go to the replicas deleteRequest.version(delete.version()); successSize++; } catch (Throwable e) { // rethrow the failure if we are going to retry on primary and let parent failure to // handle it if (retryPrimaryException(e)) { // restore updated versions... for (int j = 0; j < i; j++) { applyVersion(request.items().get(j), versions[j]); } logger.error(e.getMessage(), e); throw new ElasticsearchException(e.getMessage()); } if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { logger.trace( "[{}][{}] failed to execute bulk item (delete) {}", e, shardRequest.request.index(), shardRequest.shardId, deleteRequest); } else { logger.debug( "[{}][{}] failed to execute bulk item (delete) {}", e, shardRequest.request.index(), shardRequest.shardId, deleteRequest); } failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e))); // nullify the request so it won't execute on the replicas request.items().set(i, null); } } } if (!mappingsToUpdate.isEmpty()) { for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) { logger.info("mapping update {} {}", mappingToUpdate.v1(), mappingToUpdate.v2()); updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2()); } } IngestShardResponse response = new IngestShardResponse( new ShardId(request.index(), request.shardId()), successSize, failure); return new PrimaryResponse<IngestShardResponse, IngestShardRequest>( shardRequest.request, response, null); }
private void executeBulk( final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses) { final ClusterState clusterState = clusterService.state(); // TODO use timeout to wait here if its blocked... clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE); final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); MetaData metaData = clusterState.metaData(); for (int i = 0; i < bulkRequest.requests.size(); i++) { ActionRequest request = bulkRequest.requests.get(i); if (request instanceof DocumentRequest) { DocumentRequest req = (DocumentRequest) request; if (addFailureIfIndexIsUnavailable( req, bulkRequest, responses, i, concreteIndices, metaData)) { continue; } String concreteIndex = concreteIndices.resolveIfAbsent(req); if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; MappingMetaData mappingMd = null; if (metaData.hasIndex(concreteIndex)) { mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type()); } try { indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex); } catch (ElasticsearchParseException | RoutingMissingException e) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure( concreteIndex, indexRequest.type(), indexRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again bulkRequest.requests.set(i, null); } } else { concreteIndices.resolveIfAbsent(req); req.routing( clusterState .metaData() .resolveIndexRouting(req.parent(), req.routing(), req.index())); } } } // first, go over all the requests and create a ShardId -> Operations mapping Map<ShardId, List<BulkItemRequest>> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { ActionRequest request = bulkRequest.requests.get(i); if (request instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()); ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } else if (request instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()); MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(deleteRequest.type()); if (mappingMd != null && mappingMd.routing().required() && deleteRequest.routing() == null) { // if routing is required, and no routing on the delete request, we need to broadcast // it.... GroupShardsIterator groupShards = clusterService.operationRouting().broadcastDeleteShards(clusterState, concreteIndex); for (ShardIterator shardIt : groupShards) { List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId()); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardIt.shardId(), list); } list.add(new BulkItemRequest(i, deleteRequest)); } } else { ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } } else if (request instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) request; String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()); MappingMetaData mappingMd = clusterState.metaData().index(concreteIndex).mappingOrDefault(updateRequest.type()); if (mappingMd != null && mappingMd.routing().required() && updateRequest.routing() == null) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure( updateRequest.index(), updateRequest.type(), updateRequest.id(), new IllegalArgumentException("routing is required for this item")); responses.set(i, new BulkItemResponse(i, updateRequest.type(), failure)); continue; } ShardId shardId = clusterService .operationRouting() .indexShards( clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()) .shardId(); List<BulkItemRequest> list = requestsByShard.get(shardId); if (list == null) { list = new ArrayList<>(); requestsByShard.put(shardId, list); } list.add(new BulkItemRequest(i, request)); } } if (requestsByShard.isEmpty()) { listener.onResponse( new BulkResponse( responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); return; } final AtomicInteger counter = new AtomicInteger(requestsByShard.size()); for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List<BulkItemRequest> requests = entry.getValue(); BulkShardRequest bulkShardRequest = new BulkShardRequest( bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.timeout(bulkRequest.timeout()); shardBulkAction.execute( bulkShardRequest, new ActionListener<BulkShardResponse>() { @Override public void onResponse(BulkShardResponse bulkShardResponse) { for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { // we may have no response if item failed if (bulkItemResponse.getResponse() != null) { bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); } responses.set(bulkItemResponse.getItemId(), bulkItemResponse); } if (counter.decrementAndGet() == 0) { finishHim(); } } @Override public void onFailure(Throwable e) { // create failures for all relevant requests for (BulkItemRequest request : requests) { if (request.request() instanceof IndexRequest) { IndexRequest indexRequest = (IndexRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH), new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e))); } else if (request.request() instanceof DeleteRequest) { DeleteRequest deleteRequest = (DeleteRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), "delete", new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e))); } else if (request.request() instanceof UpdateRequest) { UpdateRequest updateRequest = (UpdateRequest) request.request(); responses.set( request.id(), new BulkItemResponse( request.id(), "update", new BulkItemResponse.Failure( concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e))); } } if (counter.decrementAndGet() == 0) { finishHim(); } } private void finishHim() { listener.onResponse( new BulkResponse( responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime))); } }); } }
@Test public void testMappingMetaDataParsed() throws Exception { logger.info("--> starting 1 nodes"); internalCluster().startNode(settingsBuilder().put("gateway.type", "local")); logger.info("--> creating test index, with meta routing"); client() .admin() .indices() .prepareCreate("test") .addMapping( "type1", XContentFactory.jsonBuilder() .startObject() .startObject("type1") .startObject("_routing") .field("required", true) .endObject() .endObject() .endObject()) .execute() .actionGet(); logger.info("--> waiting for yellow status"); ensureYellow(); logger.info("--> verify meta _routing required exists"); MappingMetaData mappingMd = client() .admin() .cluster() .prepareState() .execute() .actionGet() .getState() .metaData() .index("test") .mapping("type1"); assertThat(mappingMd.routing().required(), equalTo(true)); logger.info("--> restarting nodes..."); internalCluster().fullRestart(); logger.info("--> waiting for yellow status"); ensureYellow(); logger.info("--> verify meta _routing required exists"); mappingMd = client() .admin() .cluster() .prepareState() .execute() .actionGet() .getState() .metaData() .index("test") .mapping("type1"); assertThat(mappingMd.routing().required(), equalTo(true)); }