public void testIndexWithShadowReplicasCleansUp() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); final int nodeCount = randomIntBetween(2, 5); logger.info("--> starting {} nodes", nodeCount); final List<String> nodes = internalCluster().startNodesAsync(nodeCount, nodeSettings).get(); final String IDX = "test"; final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount); final int numPrimaries = numPrimariesAndReplicas.v1(); final int numReplicas = numPrimariesAndReplicas.v2(); logger.info( "--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas); Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); ensureGreen(IDX); client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); flushAndRefresh(IDX); GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); logger.info("--> performing query"); SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); assertHitCount(resp, 2); logger.info("--> deleting index " + IDX); assertAcked(client().admin().indices().prepareDelete(IDX)); assertAllIndicesRemovedAndDeletionCompleted( internalCluster().getInstances(IndicesService.class)); assertPathHasBeenCleared(dataPath); // TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is // resolved. // assertIndicesDirsDeleted(nodes); }
@Override public HashMap<String, String> loadKey(String username) { logger.info("loading password for username {}", username); HashMap<String, String> ret = new HashMap<>(); String riverIndexName = getRiverIndexName(); refreshSearchIndex(riverIndexName); GetResponse resp = client.prepareGet(riverIndexName, riverName().name(), "_pwd").execute().actionGet(); if (resp.isExists()) { if (logger.isDebugEnabled()) { logger.debug("Password document: {}", resp.getSourceAsString()); } Map<String, Object> newset = resp.getSource(); Set<String> keys = newset.keySet(); for (String s : keys) { logger.info( "Added key {} with a value of {}", s, XContentMapValues.nodeStringValue(newset.get(s), null)); ret.put(s, XContentMapValues.nodeStringValue(newset.get(s), null)); } } if (ret.isEmpty()) { return null; } return ret; }
@Override public void run() { long totalRefreshTime = 0; int numExecutedRefreshed = 0; while (run) { long docIdLimit = COUNT; for (long docId = 1; run && docId < docIdLimit; ) { try { for (int j = 0; j < 8; j++) { GetResponse getResponse = client.prepareGet(indexName, "type1", String.valueOf(++docId)).get(); client .prepareIndex(indexName, "type1", getResponse.getId()) .setSource(getResponse.getSource()) .get(); } long startTime = System.currentTimeMillis(); client.admin().indices().prepareRefresh(indexName).execute().actionGet(); totalRefreshTime += System.currentTimeMillis() - startTime; numExecutedRefreshed++; Thread.sleep(500); } catch (Throwable e) { e.printStackTrace(); } } } avgRefreshTime = totalRefreshTime / numExecutedRefreshed; stopped = true; }
public void testPrimaryRelocation() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); String node1 = internalCluster().startNode(nodeSettings); String IDX = "test"; Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); // Node1 has the primary, now node2 has the replica String node2 = internalCluster().startNode(nodeSettings); ensureGreen(IDX); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); flushAndRefresh(IDX); // now prevent primary from being allocated on node 1 move to node_3 String node3 = internalCluster().startNode(nodeSettings); Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); ensureGreen(IDX); logger.info("--> performing query"); SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); assertHitCount(resp, 2); gResp1 = client().prepareGet(IDX, "doc", "1").get(); gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.toString(), gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get(); gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); }
public void testReplicaToPrimaryPromotion() throws Exception { Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); String node1 = internalCluster().startNode(nodeSettings); String IDX = "test"; Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); // Node1 has the primary, now node2 has the replica String node2 = internalCluster().startNode(nodeSettings); ensureGreen(IDX); client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); flushAndRefresh(IDX); logger.info("--> stopping node1 [{}]", node1); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); ensureYellow(IDX); logger.info("--> performing query"); SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); assertHitCount(resp, 2); gResp1 = client().prepareGet(IDX, "doc", "1").get(); gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.toString(), gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get(); gResp1 = client().prepareGet(IDX, "doc", "1").get(); gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertTrue(gResp1.isExists()); assertTrue(gResp2.toString(), gResp2.isExists()); assertThat(gResp1.getSource().get("foo"), equalTo("foobar")); assertThat(gResp2.getSource().get("foo"), equalTo("foobar")); }
@Test public void testGetSlash() { final String id = "get-test/id-2"; final IndexRequestBuilder indexRequestBuilder = restClient().prepareIndex(index, type, id).setSource("field", "value").setRefresh(true); indexRequestBuilder.execute().actionGet(); final GetRequestBuilder getRequestBuilder = restClient().prepareGet(index, type, id); final ListenableActionFuture<GetResponse> execute1 = getRequestBuilder.execute(); final GetResponse get = execute1.actionGet(); assertEquals(get.getIndex(), index); assertEquals(get.getType(), type); assertEquals(get.getId(), id); assertEquals(get.getVersion(), 1); assertTrue(get.getFields().isEmpty()); assertEquals(get.getSource().get("field"), "value"); }
/** Reconfigure the river. Must be stopped! */ public synchronized void reconfigure() { if (!closed) throw new IllegalStateException("Remote River must be stopped to reconfigure it!"); logger.info("reconfiguring Remote River"); String riverIndexName = getRiverIndexName(); refreshSearchIndex(riverIndexName); GetResponse resp = client.prepareGet(riverIndexName, riverName().name(), "_meta").execute().actionGet(); if (resp.isExists()) { if (logger.isDebugEnabled()) { logger.debug("Configuration document: {}", resp.getSourceAsString()); } Map<String, Object> newset = resp.getSource(); configure(newset); } else { throw new IllegalStateException( "Configuration document not found to reconfigure remote river " + riverName().name()); } }
@CheckForNull @Override public DOMAIN getNullableByKey(KEY key) { GetRequestBuilder request = client .prepareGet() .setType(this.getIndexType()) .setIndex(this.getIndexName()) .setId(this.getKeyValue(key)) .setFetchSource(true) .setRouting(this.getKeyValue(key)); GetResponse response = request.get(); if (response.isExists()) { return toDoc(response.getSource()); } return null; }
@Override public Article get(long id) { GetResponse getResponse = transportClient.prepareGet(INDICE, TYPE, String.valueOf(id)).execute().actionGet(); if (getResponse.isExists()) { Map<String, Object> map = getResponse.getSource(); String title = (String) map.get(TITLE); String summary = (String) map.get(SUMMARY); String idStr = getResponse.getId(); logger.debug("get id : {}", idStr); Article article = new Article(); article.setId(Long.parseLong(idStr)); article.setTitle(title); article.setSummary(summary); return article; } else { return null; } }
@Override public void run() { while (run) { int childIdLimit = PARENT_COUNT * NUM_CHILDREN_PER_PARENT; for (int childId = 1; run && childId < childIdLimit; ) { try { for (int j = 0; j < 8; j++) { GetResponse getResponse = client .prepareGet(indexName, "child", String.valueOf(++childId)) .setFields("_source", "_parent") .setRouting("1") // Doesn't matter what value, since there is only one shard .get(); client .prepareIndex(indexName, "child", Integer.toString(childId) + "_" + j) .setParent(getResponse.getField("_parent").getValue().toString()) .setSource(getResponse.getSource()) .get(); } client.admin().indices().prepareRefresh(indexName).execute().actionGet(); Thread.sleep(1000); if (childId % 500 == 0) { NodesStatsResponse statsResponse = client .admin() .cluster() .prepareNodesStats() .clear() .setIndices(true) .execute() .actionGet(); System.out.println( "Deleted docs: " + statsResponse.getAt(0).getIndices().getDocs().getDeleted()); } } catch (Throwable e) { e.printStackTrace(); } } } }
@SuppressWarnings("unchecked") @Test public void updateIncludeExclude() throws Exception { assertAcked( prepareCreate("test") .addMapping( "type", jsonBuilder() .startObject() .startObject("type") .startObject("properties") .startObject("normal") .field("type", "long") .endObject() .startObject("exclude") .field("type", "long") .endObject() .startObject("include") .field("type", "long") .endObject() .endObject() .endObject() .endObject())); ensureGreen(); // make sure that replicas are initialized so the refresh command will work them // too logger.info("Index doc"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 1) .field("exclude", 1) .field("include", 1) .endObject()); refresh(); // commit it for later testing. logger.info("Adding exclude settings"); PutMappingResponse putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("exclude") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); // changed mapping doesn't affect indexed documents (checking backward compatibility) GetResponse getResponse = client().prepareGet("test", "type", "1").setRealtime(false).get(); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), hasKey("exclude")); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Index doc again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 2) .field("exclude", 1) .field("include", 2) .endObject()); // but do affect newly indexed docs getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), hasKey("normal")); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Changing mapping to includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .endArray() .startArray("includes") .value("include") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); MappingMetaData typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); ArrayList<String> includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); assertThat( (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable()); logger.info("Indexing doc yet again"); index( "test", "type", "1", JsonXContent.contentBuilder() .startObject() .field("normal", 3) .field("exclude", 3) .field("include", 3) .endObject()); getResponse = get("test", "type", "1"); assertThat(getResponse.getSource(), not(hasKey("normal"))); assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); logger.info("Adding excludes, but keep includes"); putResponse = client() .admin() .indices() .preparePutMapping("test") .setType("type") .setSource( JsonXContent.contentBuilder() .startObject() .startObject("type") .startObject("_source") .startArray("excludes") .value("*.excludes") .endArray() .endObject() .endObject()) .get(); assertTrue(putResponse.isAcknowledged()); getMappingsResponse = client().admin().indices().prepareGetMappings("test").get(); typeMapping = getMappingsResponse.getMappings().get("test").get("type"); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("includes")); includes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("includes"); assertThat(includes, contains("include")); assertThat( (Map<String, Object>) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); ArrayList<String> excludes = (ArrayList<String>) ((Map<String, Object>) typeMapping.getSourceAsMap().get("_source")).get("excludes"); assertThat(excludes, contains("*.excludes")); }
@TestLogging("org.elasticsearch.gateway:TRACE") public void testIndexWithFewDocuments() throws Exception { final Path dataPath = createTempDir(); Settings nodeSettings = nodeSettings(dataPath); internalCluster().startNodesAsync(3, nodeSettings).get(); final String IDX = "test"; Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) .put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) .build(); prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); ensureGreen(IDX); // So basically, the primary should fail and the replica will need to // replay the translog, this is what this tests client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); assertEquals( 2, indicesStatsResponse .getIndex(IDX) .getPrimaries() .getTranslog() .estimatedNumberOfOperations()); assertEquals( 2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); Index index = resolveIndex(IDX); for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { IndexService indexService = service.indexService(index); if (indexService != null) { IndexShard shard = indexService.getShard(0); TranslogStats translogStats = shard.translogStats(); assertTrue(translogStats != null || shard instanceof ShadowIndexShard); if (translogStats != null) { assertEquals(2, translogStats.estimatedNumberOfOperations()); } } } // Check that we can get doc 1 and 2, because we are doing realtime // gets and getting from the primary GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); flushAndRefresh(IDX); client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); refresh(); // Check that we can get doc 1 and 2 without realtime gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get(); gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get(); assertThat(gResp1.getSource().get("foo"), equalTo("bar")); assertThat(gResp2.getSource().get("foo"), equalTo("bar")); logger.info("--> restarting all nodes"); if (randomBoolean()) { logger.info("--> rolling restart"); internalCluster().rollingRestart(); } else { logger.info("--> full restart"); internalCluster().fullRestart(); } client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); ensureGreen(IDX); flushAndRefresh(IDX); logger.info("--> performing query"); SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); assertHitCount(resp, 4); logger.info("--> deleting index"); assertAcked(client().admin().indices().prepareDelete(IDX)); }
@Test @Slow public void testConcurrentUpdateWithRetryOnConflict() throws Exception { final boolean useBulkApi = randomBoolean(); createIndex(); ensureGreen(); int numberOfThreads = scaledRandomIntBetween(2, 5); final CountDownLatch latch = new CountDownLatch(numberOfThreads); final CountDownLatch startLatch = new CountDownLatch(1); final int numberOfUpdatesPerThread = scaledRandomIntBetween(100, 10000); final List<Throwable> failures = new CopyOnWriteArrayList<>(); for (int i = 0; i < numberOfThreads; i++) { Runnable r = new Runnable() { @Override public void run() { try { startLatch.await(); for (int i = 0; i < numberOfUpdatesPerThread; i++) { if (useBulkApi) { UpdateRequestBuilder updateRequestBuilder = client() .prepareUpdate("test", "type1", Integer.toString(i)) .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()); client().prepareBulk().add(updateRequestBuilder).execute().actionGet(); } else { client() .prepareUpdate("test", "type1", Integer.toString(i)) .setScript("ctx._source.field += 1", ScriptService.ScriptType.INLINE) .setRetryOnConflict(Integer.MAX_VALUE) .setUpsert(jsonBuilder().startObject().field("field", 1).endObject()) .execute() .actionGet(); } } } catch (Throwable e) { failures.add(e); } finally { latch.countDown(); } } }; new Thread(r).start(); } startLatch.countDown(); latch.await(); for (Throwable throwable : failures) { logger.info("Captured failure on concurrent update:", throwable); } assertThat(failures.size(), equalTo(0)); for (int i = 0; i < numberOfUpdatesPerThread; i++) { GetResponse response = client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet(); assertThat(response.getId(), equalTo(Integer.toString(i))); assertThat(response.isExists(), equalTo(true)); assertThat(response.getVersion(), equalTo((long) numberOfThreads)); assertThat((Integer) response.getSource().get("field"), equalTo(numberOfThreads)); } }
@Test public void testVersionedUpdate() throws Exception { createIndex("test"); ensureGreen(); index("test", "type", "1", "text", "value"); // version is now 1 assertThrows( client() .prepareUpdate("test", "type", "1") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(2) .execute(), VersionConflictEngineException.class); client() .prepareUpdate("test", "type", "1") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(1) .get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(2l)); // and again with a higher version.. client() .prepareUpdate("test", "type", "1") .setScript("ctx._source.text = 'v3'", ScriptService.ScriptType.INLINE) .setVersion(2) .get(); assertThat(client().prepareGet("test", "type", "1").get().getVersion(), equalTo(3l)); // after delete client().prepareDelete("test", "type", "1").get(); assertThrows( client() .prepareUpdate("test", "type", "1") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(3) .execute(), DocumentMissingException.class); // external versioning client() .prepareIndex("test", "type", "2") .setSource("text", "value") .setVersion(10) .setVersionType(VersionType.EXTERNAL) .get(); assertThrows( client() .prepareUpdate("test", "type", "2") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(2) .setVersionType(VersionType.EXTERNAL) .execute(), ActionRequestValidationException.class); // upserts - the combination with versions is a bit weird. Test are here to ensure we do not // change our behavior unintentionally // With internal versions, tt means "if object is there with version X, update it or explode. If // it is not there, index. client() .prepareUpdate("test", "type", "3") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(10) .setUpsert("{ \"text\": \"v0\" }") .get(); GetResponse get = get("test", "type", "3"); assertThat(get.getVersion(), equalTo(1l)); assertThat((String) get.getSource().get("text"), equalTo("v0")); // With force version client() .prepareUpdate("test", "type", "4") .setScript("ctx._source.text = 'v2'", ScriptService.ScriptType.INLINE) .setVersion(10) .setVersionType(VersionType.FORCE) .setUpsert("{ \"text\": \"v0\" }") .get(); get = get("test", "type", "4"); assertThat(get.getVersion(), equalTo(10l)); assertThat((String) get.getSource().get("text"), equalTo("v0")); // retry on conflict is rejected: assertThrows( client().prepareUpdate("test", "type", "1").setVersion(10).setRetryOnConflict(5), ActionRequestValidationException.class); }