private void doFinish() {
      if (finished.compareAndSet(false, true)) {
        Releasables.close(indexShardReference);
        final ShardId shardId = shardIt.shardId();
        final ActionWriteResponse.ShardInfo.Failure[] failuresArray;
        if (!shardReplicaFailures.isEmpty()) {
          int slot = 0;
          failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()];
          for (Map.Entry<String, Throwable> entry : shardReplicaFailures.entrySet()) {
            RestStatus restStatus = ExceptionsHelper.status(entry.getValue());
            failuresArray[slot++] =
                new ActionWriteResponse.ShardInfo.Failure(
                    shardId.getIndex(),
                    shardId.getId(),
                    entry.getKey(),
                    entry.getValue(),
                    restStatus,
                    false);
          }
        } else {
          failuresArray = ActionWriteResponse.EMPTY;
        }
        finalResponse.setShardInfo(
            new ActionWriteResponse.ShardInfo(totalShards, success.get(), failuresArray));

        listener.onResponse(finalResponse);
      }
    }
 @Override
 protected final void doClose() {
   if (recycler != null) {
     Releasables.close(cache);
     cache = null;
   }
 }
 @Override
 public void doClose() throws ElasticsearchException {
   if (scanContext != null) {
     scanContext.clear();
   }
   // clear and scope phase we  have
   Releasables.close(searcher, engineSearcher);
 }
 void finishWithUnexpectedFailure(Throwable failure) {
   logger.warn("unexpected error during the primary phase for action [{}]", failure, actionName);
   if (finished.compareAndSet(false, true)) {
     Releasables.close(indexShardReference);
     listener.onFailure(failure);
   } else {
     assert false : "finishWithUnexpectedFailure called but operation is already finished";
   }
 }
 void finishAsFailed(Throwable failure) {
   if (finished.compareAndSet(false, true)) {
     Releasables.close(indexShardReference);
     logger.trace("operation failed", failure);
     listener.onFailure(failure);
   } else {
     assert false : "finishAsFailed called but operation is already finished";
   }
 }
示例#6
0
 public void clearReleasables(Lifetime lifetime) {
   if (clearables != null) {
     List<List<Releasable>> releasables = new ArrayList<>();
     for (Lifetime lc : Lifetime.values()) {
       if (lc.compareTo(lifetime) > 0) {
         break;
       }
       List<Releasable> remove = clearables.remove(lc);
       if (remove != null) {
         releasables.add(remove);
       }
     }
     Releasables.close(Iterables.flatten(releasables));
   }
 }
 @Override
 public boolean release() throws ElasticsearchException {
   Releasables.release(parentDocs);
   return true;
 }
  int resolveParentDocuments(
      TopDocs topDocs,
      SearchContext context,
      Recycler.V<ObjectObjectOpenHashMap<Object, ParentDoc[]>> parentDocs) {
    int parentHitsResolved = 0;
    Recycler.V<ObjectObjectOpenHashMap<Object, Recycler.V<IntObjectOpenHashMap<ParentDoc>>>>
        parentDocsPerReader =
            cacheRecycler.hashMap(context.searcher().getIndexReader().leaves().size());
    for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
      int readerIndex =
          ReaderUtil.subIndex(scoreDoc.doc, context.searcher().getIndexReader().leaves());
      AtomicReaderContext subContext =
          context.searcher().getIndexReader().leaves().get(readerIndex);
      int subDoc = scoreDoc.doc - subContext.docBase;

      // find the parent id
      HashedBytesArray parentId =
          context.idCache().reader(subContext.reader()).parentIdByDoc(parentType, subDoc);
      if (parentId == null) {
        // no parent found
        continue;
      }
      // now go over and find the parent doc Id and reader tuple
      for (AtomicReaderContext atomicReaderContext : context.searcher().getIndexReader().leaves()) {
        AtomicReader indexReader = atomicReaderContext.reader();
        int parentDocId = context.idCache().reader(indexReader).docById(parentType, parentId);
        Bits liveDocs = indexReader.getLiveDocs();
        if (parentDocId != -1 && (liveDocs == null || liveDocs.get(parentDocId))) {
          // we found a match, add it and break

          Recycler.V<IntObjectOpenHashMap<ParentDoc>> readerParentDocs =
              parentDocsPerReader.v().get(indexReader.getCoreCacheKey());
          if (readerParentDocs == null) {
            readerParentDocs = cacheRecycler.intObjectMap(indexReader.maxDoc());
            parentDocsPerReader.v().put(indexReader.getCoreCacheKey(), readerParentDocs);
          }

          ParentDoc parentDoc = readerParentDocs.v().get(parentDocId);
          if (parentDoc == null) {
            parentHitsResolved++; // we have a hit on a parent
            parentDoc = new ParentDoc();
            parentDoc.docId = parentDocId;
            parentDoc.count = 1;
            parentDoc.maxScore = scoreDoc.score;
            parentDoc.sumScores = scoreDoc.score;
            readerParentDocs.v().put(parentDocId, parentDoc);
          } else {
            parentDoc.count++;
            parentDoc.sumScores += scoreDoc.score;
            if (scoreDoc.score > parentDoc.maxScore) {
              parentDoc.maxScore = scoreDoc.score;
            }
          }
        }
      }
    }
    boolean[] states = parentDocsPerReader.v().allocated;
    Object[] keys = parentDocsPerReader.v().keys;
    Object[] values = parentDocsPerReader.v().values;
    for (int i = 0; i < states.length; i++) {
      if (states[i]) {
        Recycler.V<IntObjectOpenHashMap<ParentDoc>> value =
            (Recycler.V<IntObjectOpenHashMap<ParentDoc>>) values[i];
        ParentDoc[] _parentDocs = value.v().values().toArray(ParentDoc.class);
        Arrays.sort(_parentDocs, PARENT_DOC_COMP);
        parentDocs.v().put(keys[i], _parentDocs);
        Releasables.release(value);
      }
    }
    Releasables.release(parentDocsPerReader);
    return parentHitsResolved;
  }
 @Override
 public void close() throws ElasticsearchException {
   Releasables.close(visitedOrds);
 }
 @Override
 protected void doClose() {
   Releasables.close(counts, collector);
 }
 @Override
 protected void doClose() {
   Releasables.close(segmentDocCounts);
 }
 @Override
 public void doClose() {
   scanContext = null;
   // clear and scope phase we  have
   Releasables.close(searcher, engineSearcher);
 }
 @AfterClass
 public static void after() throws IOException {
   SearchContext current = SearchContext.current();
   SearchContext.removeCurrent();
   Releasables.close(current);
 }
 @Override
 public void doClose() {
   Releasables.close(bucketOrds);
 }
示例#15
0
 public void applyStartedShards(StartedRerouteAllocation allocation) {
   for (ShardRouting shard : allocation.startedShards()) {
     Releasables.close(asyncFetchStarted.remove(shard.shardId()));
     Releasables.close(asyncFetchStore.remove(shard.shardId()));
   }
 }
示例#16
0
  // During concurrent close() calls we want to make sure that all of them return after the node has
  // completed it's shutdown cycle.
  // If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be
  // executed, in case another (for example api) call
  // to close() has already set some lifecycles to stopped. In this case the process will be
  // terminated even if the first call to close() has not finished yet.
  public synchronized void close() {
    if (lifecycle.started()) {
      stop();
    }
    if (!lifecycle.moveToClosed()) {
      return;
    }

    ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
    logger.info("closing ...");

    StopWatch stopWatch = new StopWatch("node_close");
    stopWatch.start("tribe");
    injector.getInstance(TribeService.class).close();
    stopWatch.stop().start("http");
    if (settings.getAsBoolean("http.enabled", true)) {
      injector.getInstance(HttpServer.class).close();
    }

    stopWatch.stop().start("rivers");
    injector.getInstance(RiversManager.class).close();

    stopWatch.stop().start("snapshot_service");
    injector.getInstance(SnapshotsService.class).close();
    stopWatch.stop().start("client");
    Releasables.close(injector.getInstance(Client.class));
    stopWatch.stop().start("indices_cluster");
    injector.getInstance(IndicesClusterStateService.class).close();
    stopWatch.stop().start("indices");
    injector.getInstance(IndicesFilterCache.class).close();
    injector.getInstance(IndicesFieldDataCache.class).close();
    injector.getInstance(IndexingMemoryController.class).close();
    injector.getInstance(IndicesTTLService.class).close();
    injector.getInstance(IndicesService.class).close();
    stopWatch.stop().start("routing");
    injector.getInstance(RoutingService.class).close();
    stopWatch.stop().start("cluster");
    injector.getInstance(ClusterService.class).close();
    stopWatch.stop().start("discovery");
    injector.getInstance(DiscoveryService.class).close();
    stopWatch.stop().start("monitor");
    injector.getInstance(MonitorService.class).close();
    stopWatch.stop().start("gateway");
    injector.getInstance(GatewayService.class).close();
    stopWatch.stop().start("search");
    injector.getInstance(SearchService.class).close();
    stopWatch.stop().start("rest");
    injector.getInstance(RestController.class).close();
    stopWatch.stop().start("transport");
    injector.getInstance(TransportService.class).close();
    stopWatch.stop().start("percolator_service");
    injector.getInstance(PercolatorService.class).close();

    for (Class<? extends LifecycleComponent> plugin : pluginsService.services()) {
      stopWatch.stop().start("plugin(" + plugin.getName() + ")");
      injector.getInstance(plugin).close();
    }

    stopWatch.stop().start("script");
    injector.getInstance(ScriptService.class).close();

    stopWatch.stop().start("thread_pool");
    // TODO this should really use ThreadPool.terminate()
    injector.getInstance(ThreadPool.class).shutdown();
    try {
      injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
      // ignore
    }
    stopWatch.stop().start("thread_pool_force_shutdown");
    try {
      injector.getInstance(ThreadPool.class).shutdownNow();
    } catch (Exception e) {
      // ignore
    }
    stopWatch.stop();

    if (logger.isTraceEnabled()) {
      logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
    }

    injector.getInstance(NodeEnvironment.class).close();
    injector.getInstance(PageCacheRecycler.class).close();
    Injectors.close(injector);

    CachedStreams.clear();

    logger.info("closed");
  }
示例#17
0
 @Override
 public boolean release() throws ElasticsearchException {
   Releasables.release(uidToScore, uidToCount);
   return true;
 }
 /** perform the operation on the node holding the primary */
 void performOnPrimary(final ShardRouting primary, final ShardIterator shardsIt) {
   final String writeConsistencyFailure = checkWriteConsistency(primary);
   if (writeConsistencyFailure != null) {
     retryBecauseUnavailable(primary.shardId(), writeConsistencyFailure);
     return;
   }
   final ReplicationPhase replicationPhase;
   try {
     indexShardReference = getIndexShardOperationsCounter(primary.shardId());
     PrimaryOperationRequest por =
         new PrimaryOperationRequest(
             primary.id(), internalRequest.concreteIndex(), internalRequest.request());
     Tuple<Response, ReplicaRequest> primaryResponse =
         shardOperationOnPrimary(observer.observedState(), por);
     logger.trace("operation completed on primary [{}]", primary);
     replicationPhase =
         new ReplicationPhase(
             shardsIt,
             primaryResponse.v2(),
             primaryResponse.v1(),
             observer,
             primary,
             internalRequest,
             listener,
             indexShardReference,
             shardFailedTimeout);
   } catch (Throwable e) {
     // shard has not been allocated yet, retry it here
     if (retryPrimaryException(e)) {
       logger.trace(
           "had an error while performing operation on primary ({}), scheduling a retry.",
           e.getMessage());
       // We have to close here because when we retry we will increment get a new reference on
       // index shard again and we do not want to
       // increment twice.
       Releasables.close(indexShardReference);
       // We have to reset to null here because whe we retry it might be that we never get to the
       // point where we assign a new reference
       // (for example, in case the operation was rejected because queue is full). In this case
       // we would release again once one of the finish methods is called.
       indexShardReference = null;
       retry(e);
       return;
     }
     if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
       if (logger.isTraceEnabled()) {
         logger.trace(
             primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]",
             e);
       }
     } else {
       if (logger.isDebugEnabled()) {
         logger.debug(
             primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]",
             e);
       }
     }
     finishAsFailed(e);
     return;
   }
   finishAndMoveToReplication(replicationPhase);
 }
 private void forceFinishAsFailed(Throwable t) {
   if (finished.compareAndSet(false, true)) {
     Releasables.close(indexShardReference);
     listener.onFailure(t);
   }
 }
示例#20
0
  @Override
  public Weight createWeight(IndexSearcher searcher) throws IOException {
    SearchContext searchContext = SearchContext.current();
    assert rewrittenChildQuery != null;
    assert rewriteIndexReader == searcher.getIndexReader()
        : "not equal, rewriteIndexReader="
            + rewriteIndexReader
            + " searcher.getIndexReader()="
            + searcher.getIndexReader();
    final Query childQuery = rewrittenChildQuery;

    IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader());
    indexSearcher.setSimilarity(searcher.getSimilarity());

    final BytesRefHash parentIds;
    final FloatArray scores;
    final IntArray occurrences;
    switch (scoreType) {
      case MAX:
        MaxCollector maxCollector =
            new MaxCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, maxCollector);
          parentIds = maxCollector.parentIds;
          scores = maxCollector.scores;
          occurrences = null;
        } finally {
          Releasables.release(maxCollector.parentIdsIndex);
        }
        break;
      case SUM:
        SumCollector sumCollector =
            new SumCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, sumCollector);
          parentIds = sumCollector.parentIds;
          scores = sumCollector.scores;
          occurrences = null;
        } finally {
          Releasables.release(sumCollector.parentIdsIndex);
        }
        break;
      case AVG:
        AvgCollector avgCollector =
            new AvgCollector(parentChildIndexFieldData, parentType, searchContext);
        try {
          indexSearcher.search(childQuery, avgCollector);
          parentIds = avgCollector.parentIds;
          scores = avgCollector.scores;
          occurrences = avgCollector.occurrences;
        } finally {
          Releasables.release(avgCollector.parentIdsIndex);
        }
        break;
      default:
        throw new RuntimeException("Are we missing a score type here? -- " + scoreType);
    }

    int size = (int) parentIds.size();
    if (size == 0) {
      Releasables.release(parentIds, scores, occurrences);
      return Queries.newMatchNoDocsQuery().createWeight(searcher);
    }

    final Filter parentFilter;
    if (size == 1) {
      BytesRef id = parentIds.get(0, new BytesRef());
      if (nonNestedDocsFilter != null) {
        List<Filter> filters =
            Arrays.asList(
                new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))),
                nonNestedDocsFilter);
        parentFilter = new AndFilter(filters);
      } else {
        parentFilter =
            new TermFilter(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)));
      }
    } else if (size <= shortCircuitParentDocSet) {
      parentFilter = new ParentIdsFilter(parentType, nonNestedDocsFilter, parentIds);
    } else {
      parentFilter = new ApplyAcceptedDocsFilter(this.parentFilter);
    }
    ParentWeight parentWeight =
        new ParentWeight(
            rewrittenChildQuery.createWeight(searcher),
            parentFilter,
            size,
            parentIds,
            scores,
            occurrences);
    searchContext.addReleasable(parentWeight);
    return parentWeight;
  }
示例#21
0
 @Override
 public boolean release() throws ElasticsearchException {
   Releasables.release(parentIds, scores, occurrences);
   return true;
 }
示例#22
0
 public void applyFailedShards(FailedRerouteAllocation allocation) {
   for (FailedRerouteAllocation.FailedShard shard : allocation.failedShards()) {
     Releasables.close(asyncFetchStarted.remove(shard.shard.shardId()));
     Releasables.close(asyncFetchStore.remove(shard.shard.shardId()));
   }
 }
示例#23
0
 @Override
 public boolean release() {
   Releasables.release(ids);
   return true;
 }
 @Override
 public void doRelease() {
   Releasables.release(bucketOrds);
 }
示例#25
0
  private void parseTemplate(ShardSearchRequest request) {

    BytesReference processedQuery;
    if (request.template() != null) {
      ExecutableScript executable =
          this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH);
      processedQuery = (BytesReference) executable.run();
    } else {
      if (!hasLength(request.templateSource())) {
        return;
      }
      XContentParser parser = null;
      Template template = null;

      try {
        parser =
            XContentFactory.xContent(request.templateSource())
                .createParser(request.templateSource());
        template = TemplateQueryParser.parse(parser, "params", "template");

        if (template.getType() == ScriptService.ScriptType.INLINE) {
          // Try to double parse for nested template id/file
          parser = null;
          try {
            ExecutableScript executable =
                this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
            processedQuery = (BytesReference) executable.run();
            parser = XContentFactory.xContent(processedQuery).createParser(processedQuery);
          } catch (ElasticsearchParseException epe) {
            // This was an non-nested template, the parse failure was due to this, it is safe to
            // assume this refers to a file
            // for backwards compatibility and keep going
            template =
                new Template(
                    template.getScript(),
                    ScriptService.ScriptType.FILE,
                    MustacheScriptEngineService.NAME,
                    null,
                    template.getParams());
            ExecutableScript executable =
                this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
            processedQuery = (BytesReference) executable.run();
          }
          if (parser != null) {
            try {
              Template innerTemplate = TemplateQueryParser.parse(parser);
              if (hasLength(innerTemplate.getScript())
                  && !innerTemplate.getType().equals(ScriptService.ScriptType.INLINE)) {
                // An inner template referring to a filename or id
                template =
                    new Template(
                        innerTemplate.getScript(),
                        innerTemplate.getType(),
                        MustacheScriptEngineService.NAME,
                        null,
                        template.getParams());
                ExecutableScript executable =
                    this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
                processedQuery = (BytesReference) executable.run();
              }
            } catch (ScriptParseException e) {
              // No inner template found, use original template from above
            }
          }
        } else {
          ExecutableScript executable =
              this.scriptService.executable(template, ScriptContext.Standard.SEARCH);
          processedQuery = (BytesReference) executable.run();
        }
      } catch (IOException e) {
        throw new ElasticsearchParseException("Failed to parse template", e);
      } finally {
        Releasables.closeWhileHandlingException(parser);
      }

      if (!hasLength(template.getScript())) {
        throw new ElasticsearchParseException("Template must have [template] field configured");
      }
    }
    request.source(processedQuery);
  }
 @Override
 protected void doClose() {
   Releasables.close(bdd);
   super.doClose();
 }
 @Override
 public void doRelease() {
   Releasables.release(mins);
 }
示例#28
0
 @Override
 protected void doClose() {
   Releasables.close(engineSearcher, docSearcher);
 }
 @Override
 protected void doClose() {
   Releasables.close(bucketOrds);
 }