public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { final IndexCache indexCache = context.indexShard().indexService().cache(); context .searcher() .dfSource( new CachedDfSource( context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e); } try { ShardSearchStats shardSearchStats = context.indexShard().searchService(); shardSearchStats.onPreQueryPhase(context); long time = System.nanoTime(); try { queryPhase.execute(context); } catch (Throwable e) { shardSearchStats.onFailedQueryPhase(context); throw ExceptionsHelper.convertToRuntime(e); } long time2 = System.nanoTime(); shardSearchStats.onQueryPhase(context, time2 - time); shardSearchStats.onPreFetchPhase(context); try { shortcutDocIdsToLoad(context); fetchPhase.execute(context); if (context.scroll() == null) { freeContext(request.id()); } else { contextProcessedSuccessfully(context); } } catch (Throwable e) { shardSearchStats.onFailedFetchPhase(context); throw ExceptionsHelper.convertToRuntime(e); } shardSearchStats.onFetchPhase(context, System.nanoTime() - time2); return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); } catch (Throwable e) { logger.trace("Fetch phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
private void doFinish() { if (finished.compareAndSet(false, true)) { Releasables.close(indexShardReference); final ShardId shardId = shardIt.shardId(); final ActionWriteResponse.ShardInfo.Failure[] failuresArray; if (!shardReplicaFailures.isEmpty()) { int slot = 0; failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()]; for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) { RestStatus restStatus = ExceptionsHelper.status(entry.getValue()); failuresArray[slot++] = new ActionWriteResponse.ShardInfo.Failure( shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false); } } else { failuresArray = ActionWriteResponse.EMPTY; } finalResponse.setShardInfo( new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray)); listener.onResponse(finalResponse); } }
/** * Helper for unzipping downloaded zips * * @param environment * @throws IOException */ private void unzip(Environment environment, ZipFile zipFile, File targetFile, String targetPath) throws IOException { String baseDirSuffix = null; try { Enumeration<? extends ZipEntry> zipEntries = zipFile.entries(); if (!zipEntries.hasMoreElements()) { logger.error("the zip archive has no entries"); } ZipEntry firstEntry = zipEntries.nextElement(); if (firstEntry.isDirectory()) { baseDirSuffix = firstEntry.getName(); } else { zipEntries = zipFile.entries(); } while (zipEntries.hasMoreElements()) { ZipEntry zipEntry = zipEntries.nextElement(); if (zipEntry.isDirectory()) { continue; } String zipEntryName = zipEntry.getName(); zipEntryName = zipEntryName.replace('\\', '/'); if (baseDirSuffix != null && zipEntryName.startsWith(baseDirSuffix)) { zipEntryName = zipEntryName.substring(baseDirSuffix.length()); } File target = new File(targetFile, zipEntryName); FileSystemUtils.mkdirs(target.getParentFile()); Streams.copy(zipFile.getInputStream(zipEntry), new FileOutputStream(target)); } } catch (IOException e) { logger.error( "failed to extract zip [" + zipFile.getName() + "]: " + ExceptionsHelper.detailedMessage(e)); return; } finally { try { zipFile.close(); } catch (IOException e) { // ignore } } File binFile = new File(targetFile, "bin"); if (binFile.exists() && binFile.isDirectory()) { File toLocation = new File(new File(environment.homeFile(), "bin"), targetPath); logger.info("found bin, moving to " + toLocation.getAbsolutePath()); FileSystemUtils.deleteRecursively(toLocation); binFile.renameTo(toLocation); } if (!new File(targetFile, "_site").exists()) { if (!FileSystemUtils.hasExtensions(targetFile, ".class", ".jar")) { logger.info("identified as a _site plugin, moving to _site structure ..."); File site = new File(targetFile, "_site"); File tmpLocation = new File(environment.pluginsFile(), targetPath + ".tmp"); targetFile.renameTo(tmpLocation); FileSystemUtils.mkdirs(targetFile); tmpLocation.renameTo(site); } } }
public void testUnsupportedFeatures() throws IOException { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) .startObject() .startObject("type") .startObject(FieldNamesFieldMapper.NAME) // by setting randomly index to no we also test the pre-1.3 behavior .field("index", randomFrom("no", "not_analyzed")) .field("store", randomFrom("no", "yes")) .endObject() .endObject() .endObject(); try { assertAcked( prepareCreate("test") .setSettings( Settings.builder() .put( "index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()) .put(indexSettings())) .addMapping("type", mapping)); } catch (MapperParsingException ex) { assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertThat( ExceptionsHelper.detailedMessage(ex) .contains( "type=_field_names is not supported on indices created before version 1.3.0"), equalTo(true)); } }
public FetchSearchResult executeFetchPhase(ShardFetchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); final ShardSearchStats shardSearchStats = context.indexShard().searchService(); try { if (request.lastEmittedDoc() != null) { context.lastEmittedDoc(request.lastEmittedDoc()); } context.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); shardSearchStats.onPreFetchPhase(context); long time = System.nanoTime(); fetchPhase.execute(context); if (context.scroll() == null) { freeContext(request.id()); } else { contextProcessedSuccessfully(context); } shardSearchStats.onFetchPhase(context, System.nanoTime() - time); return context.fetchResult(); } catch (Throwable e) { shardSearchStats.onFailedFetchPhase(context); logger.trace("Fetch phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
public void testDisabledUpdateIndexedScriptsOnly() { assertAcked( client() .admin() .cluster() .preparePutStoredScript() .setScriptLang(GroovyScriptEngineService.NAME) .setId("script1") .setSource(new BytesArray("{\"script\":\"2\"}"))); client().prepareIndex("test", "scriptTest", "1").setSource("{\"theField\":\"foo\"}").get(); try { client() .prepareUpdate("test", "scriptTest", "1") .setScript( new Script( "script1", ScriptService.ScriptType.STORED, GroovyScriptEngineService.NAME, null)) .get(); fail("update script should have been rejected"); } catch (Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); assertThat( ExceptionsHelper.detailedMessage(e), containsString( "scripts of type [stored], operation [update] and lang [groovy] are disabled")); } }
public QuerySearchResult executeScan(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); final int originalSize = context.size(); try { if (context.aggregations() != null) { throw new IllegalArgumentException("aggregations are not supported with search_type=scan"); } if (context.scroll() == null) { throw new ElasticsearchException("Scroll must be provided when scanning..."); } assert context.searchType() == SearchType.SCAN; context.searchType( SearchType .QUERY_THEN_FETCH); // move to QUERY_THEN_FETCH, and then, when scrolling, move to // SCAN context.size(0); // set size to 0 so that we only count matches assert context.searchType() == SearchType.QUERY_THEN_FETCH; contextProcessing(context); queryPhase.execute(context); contextProcessedSuccessfully(context); return context.queryResult(); } catch (Throwable e) { logger.trace("Scan phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { context.size(originalSize); cleanContext(context); } }
private Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> buildShardStores(DiscoveryNodes nodes, MutableShardRouting shard) { Map<DiscoveryNode, TransportNodesListShardStoreMetaData.StoreFilesMetaData> shardStores = cachedStores.get(shard.shardId()); ObjectOpenHashSet<String> nodesIds; if (shardStores == null) { shardStores = Maps.newHashMap(); cachedStores.put(shard.shardId(), shardStores); nodesIds = ObjectOpenHashSet.from(nodes.dataNodes().keys()); } else { nodesIds = ObjectOpenHashSet.newInstance(); // clean nodes that have failed for (Iterator<DiscoveryNode> it = shardStores.keySet().iterator(); it.hasNext(); ) { DiscoveryNode node = it.next(); if (!nodes.nodeExists(node.id())) { it.remove(); } } for (ObjectCursor<DiscoveryNode> cursor : nodes.dataNodes().values()) { DiscoveryNode node = cursor.value; if (!shardStores.containsKey(node)) { nodesIds.add(node.id()); } } } if (!nodesIds.isEmpty()) { String[] nodesIdsArray = nodesIds.toArray(String.class); TransportNodesListShardStoreMetaData.NodesStoreFilesMetaData nodesStoreFilesMetaData = listShardStoreMetaData .list(shard.shardId(), false, nodesIdsArray, listTimeout) .actionGet(); if (logger.isTraceEnabled()) { if (nodesStoreFilesMetaData.failures().length > 0) { StringBuilder sb = new StringBuilder(shard + ": failures when trying to list stores on nodes:"); for (int i = 0; i < nodesStoreFilesMetaData.failures().length; i++) { Throwable cause = ExceptionsHelper.unwrapCause(nodesStoreFilesMetaData.failures()[i]); if (cause instanceof ConnectTransportException) { continue; } sb.append("\n -> ") .append(nodesStoreFilesMetaData.failures()[i].getDetailedMessage()); } logger.trace(sb.toString()); } } for (TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData nodeStoreFilesMetaData : nodesStoreFilesMetaData) { if (nodeStoreFilesMetaData.storeFilesMetaData() != null) { shardStores.put( nodeStoreFilesMetaData.getNode(), nodeStoreFilesMetaData.storeFilesMetaData()); } } } return shardStores; }
public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { processScroll(request, context); if (context.searchType() == SearchType.QUERY_THEN_FETCH) { // first scanning, reset the from to 0 context.searchType(SearchType.SCAN); context.from(0); } queryPhase.execute(context); shortcutDocIdsToLoadForScanning(context); fetchPhase.execute(context); if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) { freeContext(request.id()); } else { contextProcessedSuccessfully(context); } return new ScrollQueryFetchSearchResult( new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget()); } catch (Throwable e) { logger.trace("Scan phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
@Override public Query visitFunction(Function function, Context context) { assert function != null; if (fieldIgnored(function, context)) { return Queries.newMatchAllQuery(); } function = rewriteAndValidateFields(function, context); FunctionToQuery toQuery = functions.get(function.info().ident().name()); if (toQuery == null) { return genericFunctionQuery(function, context); } Query query; try { query = toQuery.apply(function, context); } catch (IOException e) { throw ExceptionsHelper.convertToRuntime(e); } catch (UnsupportedOperationException e) { return genericFunctionQuery(function, context); } if (query == null) { query = queryFromInnerFunction(function, context); if (query == null) { return genericFunctionQuery(function, context); } } return query; }
public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); final ShardSearchStats shardSearchStats = context.indexShard().searchService(); try { shardSearchStats.onPreQueryPhase(context); long time = System.nanoTime(); contextProcessing(context); loadOrExecuteQueryPhase(request, context, queryPhase); if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) { freeContext(context.id()); } else { contextProcessedSuccessfully(context); } shardSearchStats.onQueryPhase(context, System.nanoTime() - time); return context.queryResult(); } catch (Throwable e) { // execution exception can happen while loading the cache, strip it if (e instanceof ExecutionException) { e = e.getCause(); } shardSearchStats.onFailedQueryPhase(context); logger.trace("Query phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
/** Should an exception be ignored when the operation is performed on the replica. */ boolean ignoreReplicaException(Throwable e) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof IllegalIndexShardStateException) { return true; } if (cause instanceof IndexMissingException) { return true; } if (cause instanceof IndexShardMissingException) { return true; } if (cause instanceof ConnectTransportException) { return true; } // on version conflict or document missing, it means // that a news change has crept into the replica, and its fine if (cause instanceof VersionConflictEngineException) { return true; } // same here if (cause instanceof DocumentAlreadyExistsException) { return true; } return false; }
protected IndexResponse indexItem( DocTableInfo tableInfo, ShardUpsertRequest request, ShardUpsertRequest.Item item, ShardId shardId, boolean tryInsertFirst, int retryCount) throws ElasticsearchException { try { IndexRequest indexRequest; if (tryInsertFirst) { // try insert first without fetching the document try { indexRequest = new IndexRequest(prepareInsert(tableInfo, request, item), request); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } } else { indexRequest = new IndexRequest(prepareUpdate(tableInfo, request, item, shardId), request); } return indexAction.execute(indexRequest).actionGet(); } catch (Throwable t) { if (t instanceof VersionConflictEngineException && retryCount < item.retryOnConflict()) { return indexItem(tableInfo, request, item, shardId, false, retryCount + 1); } else if (tryInsertFirst && item.updateAssignments() != null && t instanceof DocumentAlreadyExistsException) { // insert failed, document already exists, try update return indexItem(tableInfo, request, item, shardId, false, 0); } else { throw t; } } }
public void testIndexIntoDefaultMapping() throws Throwable { // 1. test implicit index creation try { client() .prepareIndex("index1", MapperService.DEFAULT_MAPPING, "1") .setSource("{") .execute() .get(); fail(); } catch (Throwable t) { if (t instanceof ExecutionException) { t = ((ExecutionException) t).getCause(); } final Throwable throwable = ExceptionsHelper.unwrapCause(t); if (throwable instanceof IllegalArgumentException) { assertEquals( "It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); } else { throw t; } } // 2. already existing index IndexService indexService = createIndex("index2"); try { client() .prepareIndex("index2", MapperService.DEFAULT_MAPPING, "2") .setSource() .execute() .get(); fail(); } catch (Throwable t) { if (t instanceof ExecutionException) { t = ((ExecutionException) t).getCause(); } final Throwable throwable = ExceptionsHelper.unwrapCause(t); if (throwable instanceof IllegalArgumentException) { assertEquals( "It is forbidden to index into the default mapping [_default_]", throwable.getMessage()); } else { throw t; } } assertFalse(indexService.mapperService().hasMapping(MapperService.DEFAULT_MAPPING)); }
@Override protected boolean retryOnFailure(Throwable e) { e = ExceptionsHelper.unwrapCause(e); if (e instanceof IllegalIndexShardStateException) { return true; } return false; }
private UpdateResult shardUpdateOperation( ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); switch (translate.operation()) { case UPSERT: case INDEX: IndexRequest indexRequest = translate.action(); try { WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false); return new UpdateResult(translate, indexRequest, result); } catch (Throwable t) { t = ExceptionsHelper.unwrapCause(t); boolean retry = false; if (t instanceof VersionConflictEngineException || (t instanceof DocumentAlreadyExistsException && translate.operation() == UpdateHelper.Operation.UPSERT)) { retry = true; } return new UpdateResult(translate, indexRequest, retry, t, null); } case DELETE: DeleteRequest deleteRequest = translate.action(); try { WriteResult result = shardDeleteOperation(bulkShardRequest, deleteRequest, indexShard); return new UpdateResult(translate, deleteRequest, result); } catch (Throwable t) { t = ExceptionsHelper.unwrapCause(t); boolean retry = false; if (t instanceof VersionConflictEngineException) { retry = true; } return new UpdateResult(translate, deleteRequest, retry, t, null); } case NONE: UpdateResponse updateResponse = translate.action(); indexShard.indexingService().noopUpdate(updateRequest.type()); return new UpdateResult(translate, updateResponse); default: throw new IllegalStateException("Illegal update operation " + translate.operation()); } }
protected boolean isConflictException(Throwable e) { Throwable cause = ExceptionsHelper.unwrapCause(e); // on version conflict or document missing, it means // that a new change has crept into the replica, and it's fine if (cause instanceof VersionConflictEngineException) { return true; } return false; }
public void verify( String repository, String verificationToken, final ActionListener<VerifyResponse> listener) { final DiscoveryNodes discoNodes = clusterService.state().nodes(); final DiscoveryNode localNode = discoNodes.localNode(); final ObjectContainer<DiscoveryNode> masterAndDataNodes = discoNodes.masterAndDataNodes().values(); final List<DiscoveryNode> nodes = newArrayList(); for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) { DiscoveryNode node = cursor.value; Version version = node.getVersion(); // Verification wasn't supported before v1.4.0 - no reason to send verification request to // these nodes if (version != null && version.onOrAfter(Version.V_1_4_0)) { nodes.add(node); } } final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>(); final AtomicInteger counter = new AtomicInteger(nodes.size()); for (final DiscoveryNode node : nodes) { if (node.equals(localNode)) { try { doVerify(repository, verificationToken); } catch (Throwable t) { logger.warn("[{}] failed to verify repository", t, repository); errors.add(new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(t))); } if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } else { transportService.sendRequest( node, ACTION_NAME, new VerifyNodeRepositoryRequest(repository, verificationToken), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override public void handleResponse(TransportResponse.Empty response) { if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } @Override public void handleException(TransportException exp) { errors.add( new VerificationFailure(node.id(), ExceptionsHelper.detailedMessage(exp))); if (counter.decrementAndGet() == 0) { finishVerification(listener, nodes, errors); } } }); } } }
public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { ShardSearchStats shardSearchStats = context.indexShard().searchService(); processScroll(request, context); shardSearchStats.onPreQueryPhase(context); long time = System.nanoTime(); try { queryPhase.execute(context); } catch (Throwable e) { shardSearchStats.onFailedQueryPhase(context); throw ExceptionsHelper.convertToRuntime(e); } long time2 = System.nanoTime(); shardSearchStats.onQueryPhase(context, time2 - time); shardSearchStats.onPreFetchPhase(context); try { shortcutDocIdsToLoad(context); fetchPhase.execute(context); if (context.scroll() == null) { freeContext(request.id()); } else { contextProcessedSuccessfully(context); } } catch (Throwable e) { shardSearchStats.onFailedFetchPhase(context); throw ExceptionsHelper.convertToRuntime(e); } shardSearchStats.onFetchPhase(context, System.nanoTime() - time2); return new ScrollQueryFetchSearchResult( new QueryFetchSearchResult(context.queryResult(), context.fetchResult()), context.shardTarget()); } catch (Throwable e) { logger.trace("Fetch phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
@Override protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId) throws ElasticSearchException { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(shardId); if (request.refresh() && !request.realtime()) { indexShard.refresh( new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE)); } MultiGetShardResponse response = new MultiGetShardResponse(); for (int i = 0; i < request.locations.size(); i++) { String type = request.types.get(i); String id = request.ids.get(i); String[] fields = request.fields.get(i); long version = request.versions.get(i); VersionType versionType = request.versionTypes.get(i); if (versionType == null) { versionType = VersionType.INTERNAL; } FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i); try { GetResult getResult = indexShard .getService() .get( type, id, fields, request.realtime(), version, versionType, fetchSourceContext); response.add(request.locations.get(i), new GetResponse(getResult)); } catch (Throwable t) { if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticSearchException) t; } else { logger.debug( "[{}][{}] failed to execute multi_get for [{}]/[{}]", t, request.index(), shardId, type, id); response.add( request.locations.get(i), new MultiGetResponse.Failure( request.index(), type, id, ExceptionsHelper.detailedMessage(t))); } } } return response; }
void sendFiles( Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) throws Exception { store.incRef(); try { ArrayUtil.timSort( files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first for (int i = 0; i < files.length; i++) { final StoreFileMetaData md = files[i]; try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { // it's fine that we are only having the indexInput in the try/with block. The copy // methods handles // exceptions during close correctly and doesn't hide the original exception. Streams.copy( new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md)); } catch (Exception e) { final IOException corruptIndexException; if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) { if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md); failEngine(corruptIndexException); throw corruptIndexException; } else { // corruption has happened on the way to replica RemoteTransportException exception = new RemoteTransportException( "File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(e); logger.warn( (org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage( "{} Remote file corruption on node {}, recovering {}. local checksum OK", shardId, request.targetNode(), md), corruptIndexException); throw exception; } } else { throw e; } } } } finally { store.decRef(); } }
public synchronized void clear() { parentIndexFieldData = null; List<Throwable> exceptions = new ArrayList<>(0); final Collection<IndexFieldDataCache> fieldDataCacheValues = fieldDataCaches.values(); for (IndexFieldDataCache cache : fieldDataCacheValues) { try { cache.clear(); } catch (Throwable t) { exceptions.add(t); } } fieldDataCacheValues.clear(); ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); }
public DfsSearchResult executeDfsPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); try { contextProcessing(context); dfsPhase.execute(context); contextProcessedSuccessfully(context); return context.dfsResult(); } catch (Throwable e) { logger.trace("Dfs phase failed", e); processFailure(context, e); throw ExceptionsHelper.convertToRuntime(e); } finally { cleanContext(context); } }
/** * @param type the ec2 hostname type to discover. * @return the appropriate host resolved from ec2 meta-data. * @throws IOException if ec2 meta-data cannot be obtained. * @see CustomNameResolver#resolveIfPossible(String) */ public InetAddress resolve(Ec2HostnameType type, boolean warnOnFailure) { URLConnection urlConnection = null; InputStream in = null; try { URL url = new URL(AwsEc2Service.EC2_METADATA_URL + type.ec2Name); logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); urlConnection = url.openConnection(); urlConnection.setConnectTimeout(2000); in = urlConnection.getInputStream(); BufferedReader urlReader = new BufferedReader(new InputStreamReader(in)); String metadataResult = urlReader.readLine(); if (metadataResult == null || metadataResult.length() == 0) { logger.error("no ec2 metadata returned from {}", url); return null; } return InetAddress.getByName(metadataResult); } catch (IOException e) { if (warnOnFailure) { logger.warn( "failed to get metadata for [" + type.configName + "]: " + ExceptionsHelper.detailedMessage(e)); } else { logger.debug( "failed to get metadata for [" + type.configName + "]: " + ExceptionsHelper.detailedMessage(e)); } return null; } finally { IOUtils.closeWhileHandlingException(in); } }
public synchronized void clearField(final String fieldName) { if (ParentFieldMapper.NAME.equals(fieldName)) { parentIndexFieldData = null; } List<Throwable> exceptions = new ArrayList<>(0); final IndexFieldDataCache cache = fieldDataCaches.remove(fieldName); if (cache != null) { try { cache.clear(); } catch (Throwable t) { exceptions.add(t); } } ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); }
synchronized void cancel(IndexShard shard, String reason) { final ShardRecoveryContext shardRecoveryContext = ongoingRecoveries.get(shard); if (shardRecoveryContext != null) { final List<Exception> failures = new ArrayList<>(); for (RecoverySourceHandler handlers : shardRecoveryContext.recoveryHandlers) { try { handlers.cancel(reason); } catch (Exception ex) { failures.add(ex); } finally { shard.recoveryStats().decCurrentAsSource(); } } ExceptionsHelper.maybeThrowRuntimeAndSuppress(failures); } }
private void mapSegmentCountsToGlobalCounts() { // There is no public method in Ordinals.Docs that allows for this mapping... // This is the cleanest way I can think of so far GlobalOrdinalMapping mapping = (GlobalOrdinalMapping) globalOrdinals; for (int i = 0; i < segmentDocCounts.size(); i++) { final long inc = segmentDocCounts.set(i, 0); if (inc == 0) { continue; } final long globalOrd = mapping.getGlobalOrd(i); try { incrementBucketDocCount(inc, globalOrd); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } } }
private Query queryFromInnerFunction(Function function, Context context) { for (Symbol symbol : function.arguments()) { if (symbol.symbolType() == SymbolType.FUNCTION) { String functionName = ((Function) symbol).info().ident().name(); InnerFunctionToQuery functionToQuery = innerFunctions.get(functionName); if (functionToQuery != null) { try { Query query = functionToQuery.apply(function, (Function) symbol, context); if (query != null) { return query; } } catch (IOException e) { throw ExceptionsHelper.convertToRuntime(e); } } } } return null; }
@Override public void onFailure(Exception e) { if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) { if (retries.hasNext()) { retryCount += 1; TimeValue delay = retries.next(); logger.trace("retrying rejected search after [{}]", e, delay); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, this); } else { logger.warn( "giving up on search because we retried [{}] times without success", e, retryCount); fail.accept(e); } } else { logger.warn("giving up on search because it failed with a non-retryable exception", e); fail.accept(e); } }
@Override protected ShardResponse processRequestItems( ShardId shardId, ShardUpsertRequest request, AtomicBoolean killed) { ShardResponse shardResponse = new ShardResponse(); DocTableInfo tableInfo = schemas.getWritableTable(TableIdent.fromIndexName(request.index())); for (int i = 0; i < request.itemIndices().size(); i++) { int location = request.itemIndices().get(i); ShardUpsertRequest.Item item = request.items().get(i); if (killed.get()) { throw new CancellationException(); } try { indexItem( tableInfo, request, item, shardId, item.insertValues() != null, // try insert first 0); shardResponse.add(location); } catch (Throwable t) { if (!TransportActions.isShardNotAvailableException(t) && !request.continueOnError()) { throw t; } else { logger.debug( "{} failed to execute upsert for [{}]/[{}]", t, request.shardId(), request.type(), item.id()); shardResponse.add( location, new ShardResponse.Failure( item.id(), ExceptionsHelper.detailedMessage(t), (t instanceof VersionConflictEngineException))); } } } return shardResponse; }