@Override public final String esGet(String index, String type, String id) { try { final GetResponse gr = client.prepareGet(index, type, id).execute().actionGet(); if (!gr.exists()) return "Doesn't exist"; return gr.getSourceAsString(); } catch (ElasticSearchException e) { log.debug("ElasticSearchException {}", e); return e.getMessage(); } }
@Test public void testUpdate() throws Exception { createIndex(); ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.GREEN)); try { client .prepareUpdate("test", "type1", "1") .setScript("ctx._source.field++") .execute() .actionGet(); assert false; } catch (DocumentMissingException e) { // all is well } client.prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); UpdateResponse updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx._source.field += 1") .execute() .actionGet(); assertThat(updateResponse.version(), equalTo(2L)); for (int i = 0; i < 5; i++) { GetResponse getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.sourceAsMap().get("field").toString(), equalTo("2")); } updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx._source.field += count") .addScriptParam("count", 3) .execute() .actionGet(); assertThat(updateResponse.version(), equalTo(3L)); for (int i = 0; i < 5; i++) { GetResponse getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.sourceAsMap().get("field").toString(), equalTo("5")); } // check noop updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx.op = 'none'") .execute() .actionGet(); assertThat(updateResponse.version(), equalTo(3L)); for (int i = 0; i < 5; i++) { GetResponse getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.sourceAsMap().get("field").toString(), equalTo("5")); } // check delete updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx.op = 'delete'") .execute() .actionGet(); assertThat(updateResponse.version(), equalTo(4L)); for (int i = 0; i < 5; i++) { GetResponse getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.exists(), equalTo(false)); } // check percolation client.prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); logger.info("--> register a query"); client .prepareIndex("_percolator", "test", "1") .setSource(jsonBuilder().startObject().field("query", termQuery("field", 2)).endObject()) .setRefresh(true) .execute() .actionGet(); updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx._source.field += 1") .setPercolate("*") .execute() .actionGet(); assertThat(updateResponse.matches().size(), equalTo(1)); // check TTL is kept after an update without TTL client .prepareIndex("test", "type1", "2") .setSource("field", 1) .setTTL(86400000L) .setRefresh(true) .execute() .actionGet(); GetResponse getResponse = client.prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet(); long ttl = ((Number) getResponse.field("_ttl").value()).longValue(); assertThat(ttl, greaterThan(0L)); client .prepareUpdate("test", "type1", "2") .setScript("ctx._source.field += 1") .execute() .actionGet(); getResponse = client.prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.field("_ttl").value()).longValue(); assertThat(ttl, greaterThan(0L)); // check TTL update client .prepareUpdate("test", "type1", "2") .setScript("ctx._ttl = 3600000") .execute() .actionGet(); getResponse = client.prepareGet("test", "type1", "2").setFields("_ttl").execute().actionGet(); ttl = ((Number) getResponse.field("_ttl").value()).longValue(); assertThat(ttl, greaterThan(0L)); assertThat(ttl, lessThanOrEqualTo(3600000L)); // check timestamp update client .prepareIndex("test", "type1", "3") .setSource("field", 1) .setRefresh(true) .execute() .actionGet(); client .prepareUpdate("test", "type1", "3") .setScript("ctx._timestamp = \"2009-11-15T14:12:12\"") .execute() .actionGet(); getResponse = client.prepareGet("test", "type1", "3").setFields("_timestamp").execute().actionGet(); long timestamp = ((Number) getResponse.field("_timestamp").value()).longValue(); assertThat(timestamp, equalTo(1258294332000L)); // check fields parameter client.prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); updateResponse = client .prepareUpdate("test", "type1", "1") .setScript("ctx._source.field += 1") .setFields("_source", "field") .execute() .actionGet(); assertThat(updateResponse.getResult(), notNullValue()); assertThat(updateResponse.getResult().sourceRef(), notNullValue()); assertThat(updateResponse.getResult().field("field").value(), notNullValue()); // check updates without script // add new field client.prepareIndex("test", "type1", "1").setSource("field", 1).execute().actionGet(); updateResponse = client .prepareUpdate("test", "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field2", 2).endObject()) .execute() .actionGet(); for (int i = 0; i < 5; i++) { getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.sourceAsMap().get("field").toString(), equalTo("1")); assertThat(getResponse.sourceAsMap().get("field2").toString(), equalTo("2")); } // change existing field updateResponse = client .prepareUpdate("test", "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 3).endObject()) .execute() .actionGet(); for (int i = 0; i < 5; i++) { getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); assertThat(getResponse.sourceAsMap().get("field").toString(), equalTo("3")); assertThat(getResponse.sourceAsMap().get("field2").toString(), equalTo("2")); } // recursive map Map<String, Object> testMap = new HashMap<String, Object>(); Map<String, Object> testMap2 = new HashMap<String, Object>(); Map<String, Object> testMap3 = new HashMap<String, Object>(); testMap3.put("commonkey", testMap); testMap3.put("map3", 5); testMap2.put("map2", 6); testMap.put("commonkey", testMap2); testMap.put("map1", 8); client.prepareIndex("test", "type1", "1").setSource("map", testMap).execute().actionGet(); updateResponse = client .prepareUpdate("test", "type1", "1") .setDoc(XContentFactory.jsonBuilder().startObject().field("map", testMap3).endObject()) .execute() .actionGet(); for (int i = 0; i < 5; i++) { getResponse = client.prepareGet("test", "type1", "1").execute().actionGet(); Map map1 = (Map) getResponse.sourceAsMap().get("map"); assertThat(map1.size(), equalTo(3)); assertThat(map1.containsKey("map1"), equalTo(true)); assertThat(map1.containsKey("map3"), equalTo(true)); assertThat(map1.containsKey("commonkey"), equalTo(true)); Map map2 = (Map) map1.get("commonkey"); assertThat(map2.size(), equalTo(3)); assertThat(map2.containsKey("map1"), equalTo(true)); assertThat(map2.containsKey("map2"), equalTo(true)); assertThat(map2.containsKey("commonkey"), equalTo(true)); } }
@Test public void testSnapshotOperations() throws Exception { startNode("server1"); // get the environment, so we can clear the work dir when needed Environment environment = ((InternalNode) node("server1")).injector().getInstance(Environment.class); logger.info("Running Cluster Health (waiting for node to startup properly)"); ClusterHealthResponse clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForGreenStatus()) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.status()); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.GREEN)); // Translog tests logger.info("Creating index [{}]", "test"); client("server1").admin().indices().prepareCreate("test").execute().actionGet(); // create a mapping PutMappingResponse putMappingResponse = client("server1") .admin() .indices() .preparePutMapping("test") .setType("type1") .setSource(mappingSource()) .execute() .actionGet(); assertThat(putMappingResponse.acknowledged(), equalTo(true)); // verify that mapping is there ClusterStateResponse clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet(); assertThat(clusterState.state().metaData().index("test").mapping("type1"), notNullValue()); // create two and delete the first logger.info("Indexing #1"); client("server1") .index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))) .actionGet(); logger.info("Indexing #2"); client("server1") .index(Requests.indexRequest("test").type("type1").id("2").source(source("2", "test"))) .actionGet(); // perform snapshot to the index logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Deleting #1"); client("server1").delete(deleteRequest("test").type("type1").id("1")).actionGet(); // perform snapshot to the index logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); // do it again, it should be a no op client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (only translog should be populated)"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.status()); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW)); // verify that mapping is there clusterState = client("server1").admin().cluster().state(clusterStateRequest()).actionGet(); assertThat(clusterState.state().metaData().index("test").mapping("type1"), notNullValue()); logger.info("Getting #1, should not exists"); GetResponse getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.exists(), equalTo(false)); logger.info("Getting #2"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("2", "test"))); // Now flush and add some data (so we have index recovery as well) logger.info( "Flushing, so we have actual content in the index files (#2 should be in the index)"); client("server1").admin().indices().flush(flushRequest("test")).actionGet(); logger.info("Indexing #3, so we have something in the translog as well"); client("server1") .index(Requests.indexRequest("test").type("type1").id("3").source(source("3", "test"))) .actionGet(); logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (both index and translog) and reuse work dir"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.status()); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.exists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (from the translog)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("3", "test"))); logger.info("Closing the server"); closeNode("server1"); logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway"); FileSystemUtils.deleteRecursively(environment.dataWithClusterFiles()); logger.info( "Starting the server, should recover from the gateway (both index and translog) without reusing work dir"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.status()); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.exists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (from the translog)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("3", "test"))); logger.info( "Flushing, so we have actual content in the index files (#3 should be in the index now as well)"); client("server1").admin().indices().flush(flushRequest("test")).actionGet(); logger.info("Gateway Snapshot"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Gateway Snapshot (should be a no op)"); client("server1").admin().indices().gatewaySnapshot(gatewaySnapshotRequest("test")).actionGet(); logger.info("Closing the server"); closeNode("server1"); logger.info( "Starting the server, should recover from the gateway (just from the index, nothing in the translog)"); startNode("server1"); logger.info("Running Cluster Health (wait for the shards to startup)"); clusterHealth = client("server1") .admin() .cluster() .health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(1)) .actionGet(); logger.info("Done Cluster Health, status " + clusterHealth.status()); assertThat(clusterHealth.timedOut(), equalTo(false)); assertThat(clusterHealth.status(), equalTo(ClusterHealthStatus.YELLOW)); logger.info("Getting #1, should not exists"); getResponse = client("server1").get(getRequest("test").type("type1").id("1")).actionGet(); assertThat(getResponse.exists(), equalTo(false)); logger.info("Getting #2 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("2")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("2", "test"))); logger.info("Getting #3 (not from the translog, but from the index)"); getResponse = client("server1").get(getRequest("test").type("type1").id("3")).actionGet(); assertThat(getResponse.sourceAsString(), equalTo(source("3", "test"))); logger.info("Deleting the index"); client("server1").admin().indices().delete(deleteIndexRequest("test")).actionGet(); }
public static void main(String[] args) throws Exception { Settings settings = settingsBuilder().put("gateway.type", "none").build(); Node node1 = nodeBuilder().settings(settings).node(); Node node2 = nodeBuilder().settings(settings).node(); final Node client = nodeBuilder().settings(settings).client(true).node(); final int NUMBER_OF_DOCS = 10000; final int NUMBER_OF_THREADS = 10; final long NUMBER_OF_ITERATIONS = SizeValue.parseSizeValue("10k").singles(); final long DELETE_EVERY = 10; final CountDownLatch latch = new CountDownLatch(NUMBER_OF_THREADS); Thread[] threads = new Thread[NUMBER_OF_THREADS]; for (int i = 0; i < threads.length; i++) { threads[i] = new Thread() { @Override public void run() { try { for (long i = 0; i < NUMBER_OF_ITERATIONS; i++) { if ((i % DELETE_EVERY) == 0) { client .client() .prepareDelete( "test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))) .execute() .actionGet(); } else { client .client() .prepareIndex( "test", "type1", Integer.toString(ThreadLocalRandom.current().nextInt(NUMBER_OF_DOCS))) .setSource("field1", "value1") .execute() .actionGet(); } } } finally { latch.countDown(); } } }; } for (Thread thread : threads) { thread.start(); } latch.await(); System.out.println("done indexing, verifying docs"); client.client().admin().indices().prepareRefresh().execute().actionGet(); for (int i = 0; i < NUMBER_OF_DOCS; i++) { String id = Integer.toString(i); for (int j = 0; j < 5; j++) { SearchResponse response = client .client() .prepareSearch() .setQuery(QueryBuilders.termQuery("_id", id)) .execute() .actionGet(); if (response.getHits().totalHits() > 1) { System.err.println("[" + i + "] FAIL, HITS [" + response.getHits().totalHits() + "]"); } } GetResponse getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet(); if (getResponse.exists()) { long version = getResponse.version(); for (int j = 0; j < 5; j++) { getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet(); if (!getResponse.exists()) { System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED"); break; } if (version != getResponse.version()) { System.err.println( "[" + i + "] FAIL, DIFFERENT VERSIONS: [" + version + "], [" + getResponse.version() + "]"); break; } } } else { for (int j = 0; j < 5; j++) { getResponse = client.client().prepareGet("test", "type1", id).execute().actionGet(); if (getResponse.exists()) { System.err.println("[" + i + "] FAIL, EXISTED, and NOT_EXISTED"); break; } } } } System.out.println("done."); client.close(); node1.close(); node2.close(); }