コード例 #1
0
  @Override
  protected PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    ShardDeleteRequest request = shardRequest.request;
    IndexShard indexShard =
        indicesService
            .indexServiceSafe(shardRequest.request.index())
            .shardSafe(shardRequest.shardId);
    Engine.Delete delete =
        indexShard
            .prepareDelete(request.type(), request.id(), request.version())
            .origin(Engine.Operation.Origin.PRIMARY);
    indexShard.delete(delete);
    // update the version to happen on the replicas
    request.version(delete.version());

    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
      } catch (Exception e) {
        // ignore
      }
    }

    ShardDeleteResponse response = new ShardDeleteResponse(delete.version(), delete.found());
    return new PrimaryResponse<ShardDeleteResponse, ShardDeleteRequest>(
        shardRequest.request, response, null);
  }
コード例 #2
0
 private void loadQueries(IndexShard shard) {
   try {
     shard.refresh(new Engine.Refresh("percolator_load_queries").force(true));
     // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery
     Engine.Searcher searcher =
         shard.acquireSearcher("percolator_load_queries", IndexShard.Mode.WRITE);
     try {
       Query query =
           new XConstantScoreQuery(
               indexCache
                   .filter()
                   .cache(
                       new TermFilter(
                           new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))));
       QueriesLoaderCollector queryCollector =
           new QueriesLoaderCollector(
               PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
       searcher.searcher().search(query, queryCollector);
       Map<HashedBytesRef, Query> queries = queryCollector.queries();
       for (Map.Entry<HashedBytesRef, Query> entry : queries.entrySet()) {
         Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
         shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
       }
     } finally {
       searcher.release();
     }
   } catch (Exception e) {
     throw new PercolatorException(
         shardId.index(), "failed to load queries from percolator index", e);
   }
 }
コード例 #3
0
  @Override
  protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
    ShardDeleteRequest request = shardRequest.request;
    IndexShard indexShard =
        indicesService
            .indexServiceSafe(shardRequest.request.index())
            .shardSafe(shardRequest.shardId);
    Engine.Delete delete =
        indexShard
            .prepareDelete(request.type(), request.id(), request.version())
            .origin(Engine.Operation.Origin.REPLICA);

    // IndexDeleteAction doesn't support version type at the moment. Hard coded for the INTERNAL
    // version
    delete.versionType(VersionType.INTERNAL.versionTypeForReplicationAndRecovery());

    assert delete.versionType().validateVersion(delete.version());

    indexShard.delete(delete);

    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_delete").force(false));
      } catch (Exception e) {
        // ignore
      }
    }
  }
コード例 #4
0
 @Override
 protected ShardRefreshResponse shardOperation(ShardRefreshRequest request)
     throws ElasticSearchException {
   IndexShard indexShard =
       indicesService.indexServiceSafe(request.index()).shardSafe(request.shardId());
   indexShard.refresh(new Engine.Refresh(request.waitForOperations()));
   return new ShardRefreshResponse(request.index(), request.shardId());
 }
コード例 #5
0
  @Override
  protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(
          new Engine.Refresh("refresh_flag_mget").force(TransportGetAction.REFRESH_FORCE));
    }

    MultiGetShardResponse response = new MultiGetShardResponse();
    for (int i = 0; i < request.locations.size(); i++) {
      String type = request.types.get(i);
      String id = request.ids.get(i);
      String[] fields = request.fields.get(i);

      long version = request.versions.get(i);
      VersionType versionType = request.versionTypes.get(i);
      if (versionType == null) {
        versionType = VersionType.INTERNAL;
      }

      FetchSourceContext fetchSourceContext = request.fetchSourceContexts.get(i);
      try {
        GetResult getResult =
            indexShard
                .getService()
                .get(
                    type, id, fields, request.realtime(), version, versionType, fetchSourceContext);
        response.add(request.locations.get(i), new GetResponse(getResult));
      } catch (Throwable t) {
        if (TransportActions.isShardNotAvailableException(t)) {
          throw (ElasticSearchException) t;
        } else {
          logger.debug(
              "[{}][{}] failed to execute multi_get for [{}]/[{}]",
              t,
              request.index(),
              shardId,
              type,
              id);
          response.add(
              request.locations.get(i),
              new MultiGetResponse.Failure(
                  request.index(), type, id, ExceptionsHelper.detailedMessage(t)));
        }
      }
    }

    return response;
  }
コード例 #6
0
 @Override
 protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) {
   IndexShard indexShard =
       indicesService
           .indexServiceSafe(shardRequest.shardId.getIndex())
           .shardSafe(shardRequest.shardId.id());
   IndexRequest request = shardRequest.request;
   SourceToParse sourceToParse =
       SourceToParse.source(SourceToParse.Origin.REPLICA, request.source())
           .type(request.type())
           .id(request.id())
           .routing(request.routing())
           .parent(request.parent())
           .timestamp(request.timestamp())
           .ttl(request.ttl());
   if (request.opType() == IndexRequest.OpType.INDEX) {
     Engine.Index index =
         indexShard.prepareIndex(
             sourceToParse,
             request.version(),
             request.versionType(),
             Engine.Operation.Origin.REPLICA,
             request.canHaveDuplicates());
     indexShard.index(index);
   } else {
     Engine.Create create =
         indexShard.prepareCreate(
             sourceToParse,
             request.version(),
             request.versionType(),
             Engine.Operation.Origin.REPLICA,
             request.canHaveDuplicates(),
             request.autoGeneratedId());
     indexShard.create(create);
   }
   if (request.refresh()) {
     try {
       indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
     } catch (Exception e) {
       // ignore
     }
   }
 }
コード例 #7
0
 private void loadQueries(String indexName) {
   IndexService indexService = percolatorIndexService();
   IndexShard shard = indexService.shard(0);
   shard.refresh(new Engine.Refresh(true));
   Engine.Searcher searcher = shard.searcher();
   try {
     // create a query to fetch all queries that are registered under the index name (which is the
     // type
     // in the percolator).
     Query query = new DeletionAwareConstantScoreQuery(indexQueriesFilter(indexName));
     QueriesLoaderCollector queries = new QueriesLoaderCollector();
     searcher.searcher().search(query, queries);
     percolator.addQueries(queries.queries());
   } catch (IOException e) {
     throw new PercolatorException(index, "failed to load queries from percolator index");
   } finally {
     searcher.release();
   }
 }
コード例 #8
0
  @Override
  protected GetResponse shardOperation(GetRequest request, int shardId)
      throws ElasticsearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    IndexShard indexShard = indexService.shardSafe(shardId);

    if (request.refresh() && !request.realtime()) {
      indexShard.refresh(new Engine.Refresh("refresh_flag_get").force(REFRESH_FORCE));
    }

    GetResult result =
        indexShard
            .getService()
            .get(
                request.type(),
                request.id(),
                request.fields(),
                request.realtime(),
                request.version(),
                request.versionType(),
                request.fetchSourceContext());
    return new GetResponse(result);
  }
コード例 #9
0
  @Override
  protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    final IndexRequest request = shardRequest.request;

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
      if (request.routing() == null) {
        throw new RoutingMissingException(
            shardRequest.shardId.getIndex(), request.type(), request.id());
      }
    }

    IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id());
    SourceToParse sourceToParse =
        SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source())
            .type(request.type())
            .id(request.id())
            .routing(request.routing())
            .parent(request.parent())
            .timestamp(request.timestamp())
            .ttl(request.ttl());
    long version;
    boolean created;
    Engine.IndexingOperation op;
    if (request.opType() == IndexRequest.OpType.INDEX) {
      Engine.Index index =
          indexShard.prepareIndex(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates());
      if (index.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID());
      }
      indexShard.index(index);
      version = index.version();
      op = index;
      created = index.created();
    } else {
      Engine.Create create =
          indexShard.prepareCreate(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates(),
              request.autoGeneratedId());
      if (create.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID());
      }
      indexShard.create(create);
      version = create.version();
      op = create;
      created = true;
    }
    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
      } catch (Throwable e) {
        // ignore
      }
    }

    // update the version on the request, so it will be used for the replicas
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    IndexResponse response =
        new IndexResponse(
            shardRequest.shardId.getIndex(), request.type(), request.id(), version, created);
    return new PrimaryResponse<>(shardRequest.request, response, op);
  }
コード例 #10
0
  @Override
  protected GetResponse shardOperation(GetRequest request, int shardId)
      throws ElasticSearchException {
    IndexService indexService = indicesService.indexServiceSafe(request.index());
    BloomCache bloomCache = indexService.cache().bloomCache();
    IndexShard indexShard = indexService.shardSafe(shardId);

    DocumentMapper docMapper = indexService.mapperService().documentMapper(request.type());
    if (docMapper == null) {
      throw new TypeMissingException(new Index(request.index()), request.type());
    }

    if (request.refresh()) {
      indexShard.refresh(new Engine.Refresh(false));
    }

    Engine.Searcher searcher = indexShard.searcher();
    boolean exists = false;
    byte[] source = null;
    Map<String, GetField> fields = null;
    long version = -1;
    try {
      UidField.DocIdAndVersion docIdAndVersion =
          loadCurrentVersionFromIndex(
              bloomCache, searcher, docMapper.uidMapper().term(request.type(), request.id()));
      if (docIdAndVersion != null && docIdAndVersion.docId != Lucene.NO_DOC) {
        if (docIdAndVersion.version > 0) {
          version = docIdAndVersion.version;
        }
        exists = true;
        FieldSelector fieldSelector = buildFieldSelectors(docMapper, request.fields());
        if (fieldSelector != null) {
          Document doc = docIdAndVersion.reader.document(docIdAndVersion.docId, fieldSelector);
          source = extractSource(doc, docMapper);

          for (Object oField : doc.getFields()) {
            Fieldable field = (Fieldable) oField;
            String name = field.name();
            Object value = null;
            FieldMappers fieldMappers = docMapper.mappers().indexName(field.name());
            if (fieldMappers != null) {
              FieldMapper mapper = fieldMappers.mapper();
              if (mapper != null) {
                name = mapper.names().fullName();
                value = mapper.valueForSearch(field);
              }
            }
            if (value == null) {
              if (field.isBinary()) {
                value = field.getBinaryValue();
              } else {
                value = field.stringValue();
              }
            }

            if (fields == null) {
              fields = newHashMapWithExpectedSize(2);
            }

            GetField getField = fields.get(name);
            if (getField == null) {
              getField = new GetField(name, new ArrayList<Object>(2));
              fields.put(name, getField);
            }
            getField.values().add(value);
          }
        }

        // now, go and do the script thingy if needed
        if (request.fields() != null && request.fields().length > 0) {
          SearchLookup searchLookup = null;
          for (String field : request.fields()) {
            String script = null;
            if (field.contains("_source.") || field.contains("doc[")) {
              script = field;
            } else {
              FieldMappers x = docMapper.mappers().smartName(field);
              if (x != null && !x.mapper().stored()) {
                script = "_source." + x.mapper().names().fullName();
              }
            }
            if (script != null) {
              if (searchLookup == null) {
                searchLookup =
                    new SearchLookup(
                        indexService.mapperService(), indexService.cache().fieldData());
              }
              SearchScript searchScript = scriptService.search(searchLookup, "mvel", script, null);
              searchScript.setNextReader(docIdAndVersion.reader);
              searchScript.setNextDocId(docIdAndVersion.docId);

              try {
                Object value = searchScript.run();
                if (fields == null) {
                  fields = newHashMapWithExpectedSize(2);
                }
                GetField getField = fields.get(field);
                if (getField == null) {
                  getField = new GetField(field, new ArrayList<Object>(2));
                  fields.put(field, getField);
                }
                getField.values().add(value);
              } catch (RuntimeException e) {
                if (logger.isTraceEnabled()) {
                  logger.trace("failed to execute get request script field [{}]", e, script);
                }
                // ignore
              }
            }
          }
        }
      }
    } catch (IOException e) {
      throw new ElasticSearchException(
          "Failed to get type [" + request.type() + "] and id [" + request.id() + "]", e);
    } finally {
      searcher.release();
    }
    return new GetResponse(
        request.index(), request.type(), request.id(), version, exists, source, fields);
  }