@Test(expected = MergeMappingException.class) public void updateMappingWithConflicts() throws Exception { client() .admin() .indices() .prepareCreate("test") .setSettings( ImmutableSettings.settingsBuilder() .put("index.number_of_shards", 2) .put("index.number_of_replicas", 0)) .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}") .execute() .actionGet(); client() .admin() .cluster() .prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .execute() .actionGet(); PutMappingResponse putMappingResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"integer\"}}}}") .execute() .actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); }
/* Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381 */ @Test public void updateMappingNoChanges() throws Exception { client() .admin() .indices() .prepareCreate("test") .setSettings( settingsBuilder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) .addMapping("type", "{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}") .execute() .actionGet(); client() .admin() .cluster() .prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .execute() .actionGet(); PutMappingResponse putMappingResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource("{\"type\":{\"properties\":{\"body\":{\"type\":\"string\"}}}}") .execute() .actionGet(); // no changes, we return assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); }
private static void createIndex() { CreateIndexRequest cir = new CreateIndexRequest(indexName2); CreateIndexResponse response = client.admin().indices().create(cir).actionGet(); System.out.println("create index: " + response.isAcknowledged()); PutMappingRequest pmr = new PutMappingRequest(indexName2).type(indexType3).source(mapping); PutMappingResponse response2 = client.admin().indices().putMapping(pmr).actionGet(); System.out.println("put mapping: " + response2.isAcknowledged()); }
@Test public void testPutMappingNoAcknowledgement() { createIndex("test"); ensureGreen(); PutMappingResponse putMappingResponse = client() .admin() .indices() .preparePutMapping("test") .setType("test") .setSource("field", "type=string,index=not_analyzed") .setTimeout("0s") .get(); assertThat(putMappingResponse.isAcknowledged(), equalTo(false)); }
protected void preparePutMapping() throws Exception { XContentBuilder xContentBuilder = jsonBuilder() .startObject() .startObject(super.getIndexType()) .startObject("properties") .startObject("massimale") .startObject("properties") .startObject("id") .field("type", "string") .endObject() .startObject("areaGeografica") .field("type", "string") .endObject() .startObject("descrizione") .field("type", "string") .endObject() .startObject("livello") .field("type", "string") .endObject() .startObject("tipo") .field("type", "string") .endObject() .startObject("value") .field("type", "double") .endObject() .startObject("valueMezzaGiornata") .field("type", "double") .endObject() .endObject() .endObject() .endObject() .endObject() .endObject(); logger.debug( "#####################TRYING TO PUT MAPPING with SOURCE : \n{}\n", xContentBuilder.string()); PutMappingResponse putMappingResponse = super.putMapping(xContentBuilder); logger.debug( "##########################" + ((putMappingResponse.isAcknowledged()) ? "PUT_MAPPING_STATUS IS OK.\n" : "PUT_MAPPING NOT CREATED.\n")); }
@Test public void updateMappingWithoutTypeMultiObjects() throws Exception { client() .admin() .indices() .prepareCreate("test") .setSettings( ImmutableSettings.settingsBuilder() .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0)) .execute() .actionGet(); client() .admin() .cluster() .prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForGreenStatus() .execute() .actionGet(); PutMappingResponse putMappingResponse = client() .admin() .indices() .preparePutMapping("test") .setType("doc") .setSource( "{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}") .execute() .actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet(); assertThat( getMappingsResponse.mappings().get("test").get("doc").source().toString(), equalTo( "{\"doc\":{\"_source\":{\"enabled\":false},\"properties\":{\"date\":{\"type\":\"integer\"}}}}")); }
@Test public void percolateWithValidParameters() throws IOException { String index = "cvbank"; String type = "candidate"; createIndex(index); ensureSearchable(index); PutMappingResponse putMappingResponse = client() .admin() .indices() .putMapping( new PutMappingRequest(index) .type(type) .source( "{ \"properties\" : { \"language\" : {\"type\" : \"string\", \"store\" : \"yes\"} } }")) .actionGet(); assertTrue(putMappingResponse.isAcknowledged()); String query = "{\n" + " \"query\" : {\n" + " \"term\" : {\n" + " \"language\" : \"java\"\n" + " }\n" + " }\n" + "}"; // register percolator query on our index JestResult result = client.execute(new Index.Builder(query).index(index).type(".percolator").id("1").build()); assertTrue(result.getErrorMessage(), result.isSucceeded()); // try to match a document against the registered percolator Percolate percolate = new Percolate.Builder(index, type, "{\"doc\" : {\"language\":\"java\"}}").build(); result = client.execute(percolate); assertTrue(result.getErrorMessage(), result.isSucceeded()); assertEquals(1, result.getJsonObject().getAsJsonPrimitive("total").getAsInt()); }
@SuppressWarnings("unchecked") @Test public void updateDefaultMappingSettings() throws Exception { logger.info("Creating index with _default_ mappings"); client() .admin() .indices() .prepareCreate("test") .addMapping( MapperService.DEFAULT_MAPPING, JsonXContent.contentBuilder() .startObject() .startObject(MapperService.DEFAULT_MAPPING) .field("date_detection", false) .endObject() .endObject()) .get(); GetMappingsResponse getResponse = client() .admin() .indices() .prepareGetMappings("test") .addTypes(MapperService.DEFAULT_MAPPING) .get(); Map<String, Object> defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); assertThat(defaultMapping, hasKey("date_detection")); logger.info("Emptying _default_ mappings"); // now remove it PutMappingResponse putResponse = client() .admin() .indices() .preparePutMapping("test") .setType(MapperService.DEFAULT_MAPPING) .setSource( JsonXContent.contentBuilder() .startObject() .startObject(MapperService.DEFAULT_MAPPING) .endObject() .endObject()) .get(); assertThat(putResponse.isAcknowledged(), equalTo(true)); logger.info("Done Emptying _default_ mappings"); getResponse = client() .admin() .indices() .prepareGetMappings("test") .addTypes(MapperService.DEFAULT_MAPPING) .get(); defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); assertThat(defaultMapping, not(hasKey("date_detection"))); // now test you can change stuff that are normally unchangeable logger.info("Creating _default_ mappings with an analyzed field"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType(MapperService.DEFAULT_MAPPING) .setSource( JsonXContent.contentBuilder() .startObject() .startObject(MapperService.DEFAULT_MAPPING) .startObject("properties") .startObject("f") .field("type", "string") .field("index", "analyzed") .endObject() .endObject() .endObject() .endObject()) .get(); assertThat(putResponse.isAcknowledged(), equalTo(true)); logger.info("Changing _default_ mappings field from analyzed to non-analyzed"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType(MapperService.DEFAULT_MAPPING) .setSource( JsonXContent.contentBuilder() .startObject() .startObject(MapperService.DEFAULT_MAPPING) .startObject("properties") .startObject("f") .field("type", "string") .field("index", "not_analyzed") .endObject() .endObject() .endObject() .endObject()) .get(); assertThat(putResponse.isAcknowledged(), equalTo(true)); logger.info("Done changing _default_ mappings field from analyzed to non-analyzed"); getResponse = client() .admin() .indices() .prepareGetMappings("test") .addTypes(MapperService.DEFAULT_MAPPING) .get(); defaultMapping = getResponse.getMappings().get("test").get(MapperService.DEFAULT_MAPPING).sourceAsMap(); Map<String, Object> fieldSettings = (Map<String, Object>) ((Map) defaultMapping.get("properties")).get("f"); assertThat(fieldSettings, hasEntry("index", (Object) "not_analyzed")); // but we still validate the _default_ type logger.info("Confirming _default_ mappings validation"); assertThrows( client() .admin() .indices() .preparePutMapping("test") .setType(MapperService.DEFAULT_MAPPING) .setSource( JsonXContent.contentBuilder() .startObject() .startObject(MapperService.DEFAULT_MAPPING) .startObject("properties") .startObject("f") .field("type", "DOESNT_EXIST") .endObject() .endObject() .endObject() .endObject()), MapperParsingException.class); }
@Test @Slow public void testSnapshotOperations() throws Exception { startNode("server1", getClassDefaultSettings()); // get the environment, so we can clear the work dir when needed Environment environment = ((InternalNode) node("server1")).injector().getInstance(Environment.class); logger.info("Running Cluster Health (waiting for node to startup properly)"); ClusterHealthResponse clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForGreenStatus()) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN)); // Translog tests logger.info("Creating index [{}]", "test"); client("server1").admin().indices().prepareCreate("test").execute().actionGet(); // create a mapping PutMappingResponse putMappingResponse = client("server1") .admin() .indices() .preparePutMapping("test") .setType("type1") .setSource(mappingSource()) .execute() .actionGet(); assertThat(putMappingResponse.isAcknowledged(), equalTo(true)); // verify that mapping is there ClusterStateResponse clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet(); assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue()); // create two and delete the first logger.info("Indexing #1"); client("server1") .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))) .actionGet(); logger.info("Indexing #2"); client("server1") .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))) .actionGet(); // perform snapshot to the index logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Deleting #1"); client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet(); // perform snapshot to the index logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); // do it again, it should be a no op client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (only translog should be populated)"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); // verify that mapping is there clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet(); assertThat(clusterState.getState().metaData().index("test").mapping("type1"), notNullValue()); logger.info("Getting #1, should not exists"); GetResponse getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.isExists(), equalTo(false)); logger.info("Getting #2"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test"))); // Now flush and add some data (so we have index recovery as well) logger.info( "Flushing, so we have actual content in the index files (#2 should be in the index)"); client("server1").admin().indices().flush(flushRequest("test")).actionGet(); logger.info("Indexing #3, so we have something in the translog as well"); client("server1") .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))) .actionGet(); logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (both index and translog) and reuse work dir"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.isExists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (from the translog)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test"))); logger.info("Closing the server"); closeNode("server1"); logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway"); FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles()); logger.info( "Starting the server, should recover from the gateway (both index and translog) without reusing work dir"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.isExists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (from the translog)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test"))); logger.info( "Flushing, so we have actual content in the index files (#3 should be in the index now as well)"); client("server1").admin().indices().flush(flushRequest("test")).actionGet(); logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (just from the index, nothing in the translog)"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.getStatus()); assertThat(clusterHealth.isTimedOut(), equalTo(false)); assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.isExists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.getSourceAsString(), equalTo(source("3", "test"))); logger.info("Deleting the index"); client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet(); }
@SuppressWarnings("unchecked") @Test public void updateIncludeExclude() throws Exception { assertAcked( prepareCreate("test") .addMapping( "type", jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("normal") .field("type", "long") .endObject() .startObject("exclude") .field("type", "long") .endObject() .startObject("include") .field("type", "long") .endObject() .endObject() .endObject() .endObject())); ensureGreen(); // make sure that replicas are initialized so the refresh command will work them // too logger.info("Index doc"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 1) .field("exclude", 1) .field("include", 1) .endObject()); refresh(); // commit it for later testing. logger.info("Adding exclude settings"); PutMappingResponse putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("exclude") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); // changed mapping doesn't affect indexed documents (checking backward compatibility) GetResponse getResponse = client().prepareGet("test", "type", "1").setRealtime(false).get(); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), hasKey("exclude")); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Index doc again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 2) .field("exclude", 1) .field("include", 2) .endObject()); // but do affect newly indexed docs getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Changing mapping to includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .endArray() .startArray("includes") .value("include") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); MappingMetaData typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); ArrayList<String> includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); assertThat( (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable()); logger.info("Indexing doc yet again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 3) .field("exclude", 3) .field("include", 3) .endObject()); getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), not(hasKey("normal"))); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Adding excludes, but keep includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("*.excludes") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); ArrayList<String> excludes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"); assertThat(excludes, contains("*.excludes")); }