private void doFinish() { if (finished.compareAndSet(false, true)) { Releasables.close(indexShardReference); final ShardId shardId = shardIt.shardId(); final ActionWriteResponse.ShardInfo.Failure[] failuresArray; if (!shardReplicaFailures.isEmpty()) { int slot = 0; failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()]; for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) { RestStatus restStatus = ExceptionsHelper.status(entry.getValue()); failuresArray[slot++] = new ActionWriteResponse.ShardInfo.Failure( shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false); } } else { failuresArray = ActionWriteResponse.EMPTY; } finalResponse.setShardInfo( new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray)); listener.onResponse(finalResponse); } }
@Override protected final void doClose() { if (recycler != null) { Releasables.close(cache); cache = null; } }
@Override public void doClose() throws ElasticsearchException { if (scanContext != null) { scanContext.clear(); } // clear and scope phase we have Releasables.close(searcher, engineSearcher); }
void finishWithUnexpectedFailure(Throwable failure) { logger.warn("unexpected error during the primary phase for action [{}]", failure, actionName); if (finished.compareAndSet(false, true)) { Releasables.close(indexShardReference); listener.onFailure(failure); } else { assert false : "finishWithUnexpectedFailure called but operation is already finished"; } }
void finishAsFailed(Throwable failure) { if (finished.compareAndSet(false, true)) { Releasables.close(indexShardReference); logger.trace("operation failed", failure); listener.onFailure(failure); } else { assert false : "finishAsFailed called but operation is already finished"; } }
public void clearReleasables(Lifetime lifetime) { if (clearables != null) { List<List<Releasable>> releasables = new ArrayList<>(); for (Lifetime lc : Lifetime.values()) { if (lc.compareTo(lifetime) > 0) { break; } List<Releasable> remove = clearables.remove(lc); if (remove != null) { releasables.add(remove); } } Releasables.close(Iterables.flatten(releasables)); } }
@Override public void doClose() { Releasables.close(bucketOrds); }
public void applyFailedShards(FailedRerouteAllocation allocation) { for (FailedRerouteAllocation.FailedShard shard : allocation.failedShards()) { Releasables.close(asyncFetchStarted.remove(shard.shard.shardId())); Releasables.close(asyncFetchStore.remove(shard.shard.shardId())); } }
public void applyStartedShards(StartedRerouteAllocation allocation) { for (ShardRouting shard : allocation.startedShards()) { Releasables.close(asyncFetchStarted.remove(shard.shardId())); Releasables.close(asyncFetchStore.remove(shard.shardId())); } }
// During concurrent close() calls we want to make sure that all of them return after the node has // completed it's shutdown cycle. // If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be // executed, in case another (for example api) call // to close() has already set some lifecycles to stopped. In this case the process will be // terminated even if the first call to close() has not finished yet. public synchronized void close() { if (lifecycle.started()) { stop(); } if (!lifecycle.moveToClosed()) { return; } ESLogger logger = Loggers.getLogger(Node.class, settings.get("name")); logger.info("closing ..."); StopWatch stopWatch = new StopWatch("node_close"); stopWatch.start("tribe"); injector.getInstance(TribeService.class).close(); stopWatch.stop().start("http"); if (settings.getAsBoolean("http.enabled", true)) { injector.getInstance(HttpServer.class).close(); } stopWatch.stop().start("rivers"); injector.getInstance(RiversManager.class).close(); stopWatch.stop().start("snapshot_service"); injector.getInstance(SnapshotsService.class).close(); stopWatch.stop().start("client"); Releasables.close(injector.getInstance(Client.class)); stopWatch.stop().start("indices_cluster"); injector.getInstance(IndicesClusterStateService.class).close(); stopWatch.stop().start("indices"); injector.getInstance(IndicesFilterCache.class).close(); injector.getInstance(IndicesFieldDataCache.class).close(); injector.getInstance(IndexingMemoryController.class).close(); injector.getInstance(IndicesTTLService.class).close(); injector.getInstance(IndicesService.class).close(); stopWatch.stop().start("routing"); injector.getInstance(RoutingService.class).close(); stopWatch.stop().start("cluster"); injector.getInstance(ClusterService.class).close(); stopWatch.stop().start("discovery"); injector.getInstance(DiscoveryService.class).close(); stopWatch.stop().start("monitor"); injector.getInstance(MonitorService.class).close(); stopWatch.stop().start("gateway"); injector.getInstance(GatewayService.class).close(); stopWatch.stop().start("search"); injector.getInstance(SearchService.class).close(); stopWatch.stop().start("rest"); injector.getInstance(RestController.class).close(); stopWatch.stop().start("transport"); injector.getInstance(TransportService.class).close(); stopWatch.stop().start("percolator_service"); injector.getInstance(PercolatorService.class).close(); for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) { stopWatch.stop().start("plugin(" + plugin.getName() + ")"); injector.getInstance(plugin).close(); } stopWatch.stop().start("script"); injector.getInstance(ScriptService.class).close(); stopWatch.stop().start("thread_pool"); // TODO this should really use ThreadPool.terminate() injector.getInstance(ThreadPool.class).shutdown(); try { injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { // ignore } stopWatch.stop().start("thread_pool_force_shutdown"); try { injector.getInstance(ThreadPool.class).shutdownNow(); } catch (Exception e) { // ignore } stopWatch.stop(); if (logger.isTraceEnabled()) { logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint()); } injector.getInstance(NodeEnvironment.class).close(); injector.getInstance(PageCacheRecycler.class).close(); Injectors.close(injector); CachedStreams.clear(); logger.info("closed"); }
@Override protected void doClose() { Releasables.close(segmentDocCounts); }
@Override public void doClose() { scanContext = null; // clear and scope phase we have Releasables.close(searcher, engineSearcher); }
/** perform the operation on the node holding the primary */ void performOnPrimary(final ShardRouting primary, final ShardIterator shardsIt) { final String writeConsistencyFailure = checkWriteConsistency(primary); if (writeConsistencyFailure != null) { retryBecauseUnavailable(primary.shardId(), writeConsistencyFailure); return; } final ReplicationPhase replicationPhase; try { indexShardReference = getIndexShardOperationsCounter(primary.shardId()); PrimaryOperationRequest por = new PrimaryOperationRequest( primary.id(), internalRequest.concreteIndex(), internalRequest.request()); Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(observer.observedState(), por); logger.trace("operation completed on primary [{}]", primary); replicationPhase = new ReplicationPhase( shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener, indexShardReference, shardFailedTimeout); } catch (Throwable e) { // shard has not been allocated yet, retry it here if (retryPrimaryException(e)) { logger.trace( "had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage()); // We have to close here because when we retry we will increment get a new reference on // index shard again and we do not want to // increment twice. Releasables.close(indexShardReference); // We have to reset to null here because whe we retry it might be that we never get to the // point where we assign a new reference // (for example, in case the operation was rejected because queue is full). In this case // we would release again once one of the finish methods is called. indexShardReference = null; retry(e); return; } if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (logger.isTraceEnabled()) { logger.trace( primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } else { if (logger.isDebugEnabled()) { logger.debug( primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } finishAsFailed(e); return; } finishAndMoveToReplication(replicationPhase); }
private void forceFinishAsFailed(Throwable t) { if (finished.compareAndSet(false, true)) { Releasables.close(indexShardReference); listener.onFailure(t); } }
@AfterClass public static void after() throws IOException { SearchContext current = SearchContext.current(); SearchContext.removeCurrent(); Releasables.close(current); }
@Override protected void doClose() { Releasables.close(counts, collector); }
@Override protected void doClose() { Releasables.close(bdd); super.doClose(); }
@Override public void close() throws ElasticsearchException { Releasables.close(visitedOrds); }
@Override protected void doClose() { Releasables.close(engineSearcher, docSearcher); }
@Override protected void doClose() { Releasables.close(bucketOrds); }