public DocumentMapper merge( String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) { if (DEFAULT_MAPPING.equals(type)) { // verify we can parse it // NOTE: never apply the default here DocumentMapper mapper = documentParser.parse(type, mappingSource); // still add it as a document mapper so we have it registered and, for example, persisted back // into // the cluster meta data if needed, or checked for existence synchronized (this) { mappers = newMapBuilder(mappers).put(type, mapper).map(); } try { defaultMappingSource = mappingSource.string(); } catch (IOException e) { throw new ElasticsearchGenerationException("failed to un-compress", e); } return mapper; } else { synchronized (this) { final boolean applyDefault = // the default was already applied if we are recovering reason != MergeReason.MAPPING_RECOVERY // only apply the default mapping if we don't have the type yet && mappers.containsKey(type) == false; DocumentMapper mergeWith = parse(type, mappingSource, applyDefault); return merge(mergeWith, reason, updateAllTypes); } } }
private void testNoConflictWhileMergingAndMappingChanged( XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping indexService .mapperService() .merge( "type", new CompressedXContent(mappingUpdate.bytes()), MapperService.MergeReason.MAPPING_UPDATE, false); // make sure mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); }
public DocumentMapper merge( String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) { if (DEFAULT_MAPPING.equals(type)) { // verify we can parse it DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource); // still add it as a document mapper so we have it registered and, for example, persisted back // into // the cluster meta data if needed, or checked for existence try (ReleasableLock lock = mappingWriteLock.acquire()) { mappers = newMapBuilder(mappers).put(type, mapper).map(); } try { defaultMappingSource = mappingSource.string(); } catch (IOException e) { throw new ElasticsearchGenerationException("failed to un-compress", e); } return mapper; } else { return merge(parse(type, mappingSource, applyDefault), updateAllTypes); } }
private boolean processMapping( String index, MapperService mapperService, String mappingType, CompressedXContent mappingSource) throws Throwable { if (!seenMappings.containsKey(new Tuple<>(index, mappingType))) { seenMappings.put(new Tuple<>(index, mappingType), true); } // refresh mapping can happen for 2 reasons. The first is less urgent, and happens when the // mapping on this // node is ahead of what there is in the cluster state (yet an update-mapping has been sent to // it already, // it just hasn't been processed yet and published). Eventually, the mappings will converge, and // the refresh // mapping sent is more of a safe keeping (assuming the update mapping failed to reach the // master, ...) // the second case is where the parsing/merging of the mapping from the metadata doesn't result // in the same // mapping, in this case, we send to the master to refresh its own version of the mappings (to // conform with the // merge version of it, which it does when refreshing the mappings), and warn log it. boolean requiresRefresh = false; try { if (!mapperService.hasMapping(mappingType)) { if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) { logger.debug( "[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string()); } else if (logger.isTraceEnabled()) { logger.trace( "[{}] adding mapping [{}], source [{}]", index, mappingType, mappingSource.string()); } else { logger.debug( "[{}] adding mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType); } // we don't apply default, since it has been applied when the mappings were parsed initially mapperService.merge(mappingType, mappingSource, false, true); if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) { logger.debug( "[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource()); requiresRefresh = true; } } else { DocumentMapper existingMapper = mapperService.documentMapper(mappingType); if (!mappingSource.equals(existingMapper.mappingSource())) { // mapping changed, update it if (logger.isDebugEnabled() && mappingSource.compressed().length < 512) { logger.debug( "[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string()); } else if (logger.isTraceEnabled()) { logger.trace( "[{}] updating mapping [{}], source [{}]", index, mappingType, mappingSource.string()); } else { logger.debug( "[{}] updating mapping [{}] (source suppressed due to length, use TRACE level if needed)", index, mappingType); } // we don't apply default, since it has been applied when the mappings were parsed // initially mapperService.merge(mappingType, mappingSource, false, true); if (!mapperService.documentMapper(mappingType).mappingSource().equals(mappingSource)) { requiresRefresh = true; logger.debug( "[{}] parsed mapping [{}], and got different sources\noriginal:\n{}\nparsed:\n{}", index, mappingType, mappingSource, mapperService.documentMapper(mappingType).mappingSource()); } } } } catch (Throwable e) { logger.warn( "[{}] failed to add mapping [{}], source [{}]", e, index, mappingType, mappingSource); throw e; } return requiresRefresh; }