@Test
  public void testVersioningIndexConflictWithFlush() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(1l));

    index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(2l));

    engine.flush(new Engine.Flush());

    index = new Engine.Index(null, newUid("1"), doc).version(1l);
    try {
      engine.index(index);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    // future versions should not work as well
    index = new Engine.Index(null, newUid("1"), doc).version(3l);
    try {
      engine.index(index);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }
  }
예제 #2
0
 @Override
 public Engine.Index beforeIndex(Engine.Index index) {
   if (index.type().equals(index().name())) {
     percolator.addQuery(index.id(), index.source());
   }
   return index;
 }
  /**
   * Execute the given {@link IndexRequest} on a primary shard, throwing a {@link
   * RetryOnPrimaryException} if the operation needs to be re-tried.
   */
  protected final WriteResult<IndexResponse> executeIndexRequestOnPrimary(
      BulkShardRequest shardRequest, IndexRequest request, IndexShard indexShard) throws Throwable {
    Engine.Index operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
    Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
    final ShardId shardId = indexShard.shardId();
    if (update != null) {
      final String indexName = shardId.getIndex();
      mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
      operation = prepareIndexOperationOnPrimary(shardRequest, request, indexShard);
      update = operation.parsedDoc().dynamicMappingsUpdate();
      if (update != null) {
        throw new RetryOnPrimaryException(
            shardId, "Dynamics mappings are not available on the node that holds the primary yet");
      }
    }
    final boolean created = indexShard.index(operation);

    // update the version on request so it will happen on the replicas
    final long version = operation.version();
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    return new WriteResult(
        new IndexResponse(
            shardId.getIndex(), request.type(), request.id(), request.version(), created),
        operation.getTranslogLocation());
  }
예제 #4
0
 @Override
 public void postIndexUnderLock(Engine.Index index) {
   // add the query under a doc lock
   if (PercolatorService.TYPE_NAME.equals(index.type())) {
     addPercolateQuery(index.id(), index.source());
   }
 }
 @Override
 public Engine.Index preIndex(Engine.Index operation) {
   if (operation.origin() != Engine.Operation.Origin.RECOVERY) {
     totalStats.indexCurrent.inc();
     typeStats(operation.type()).indexCurrent.inc();
   }
   return operation;
 }
예제 #6
0
 @Override
 public Engine.Index preIndex(Engine.Index index) {
   // validate the query here, before we index
   if (PercolatorService.TYPE_NAME.equals(index.type())) {
     parsePercolatorDocument(index.id(), index.source());
   }
   return index;
 }
  @Test
  public void testVersioningDeleteConflictWithFlush() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(1l));

    index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(2l));

    engine.flush(new Engine.Flush());

    Engine.Delete delete = new Engine.Delete("test", "1", newUid("1")).version(1l);
    try {
      engine.delete(delete);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    // future versions should not work as well
    delete = new Engine.Delete("test", "1", newUid("1")).version(3l);
    try {
      engine.delete(delete);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    engine.flush(new Engine.Flush());

    // now actually delete
    delete = new Engine.Delete("test", "1", newUid("1")).version(2l);
    engine.delete(delete);
    assertThat(delete.version(), equalTo(3l));

    engine.flush(new Engine.Flush());

    // now check if we can index to a delete doc with version
    index = new Engine.Index(null, newUid("1"), doc).version(2l);
    try {
      engine.index(index);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    // we shouldn't be able to create as well
    Engine.Create create = new Engine.Create(null, newUid("1"), doc).version(2l);
    try {
      engine.create(create);
    } catch (VersionConflictEngineException e) {
      // all is well
    }
  }
 @Override
 public void postIndex(Engine.Index index, Throwable ex) {
   if (index.origin() != Engine.Operation.Origin.RECOVERY) {
     totalStats.indexCurrent.dec();
     typeStats(index.type()).indexCurrent.dec();
     totalStats.indexFailed.inc();
     typeStats(index.type()).indexFailed.inc();
   }
 }
 @Override
 public void postIndex(Engine.Index index, boolean created) {
   if (index.origin() != Engine.Operation.Origin.RECOVERY) {
     long took = index.endTime() - index.startTime();
     totalStats.indexMetric.inc(took);
     totalStats.indexCurrent.dec();
     StatsHolder typeStats = typeStats(index.type());
     typeStats.indexMetric.inc(took);
     typeStats.indexCurrent.dec();
   }
 }
  @Test
  public void testVersioningNewIndex() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(1l));

    index = new Engine.Index(null, newUid("1"), doc).version(index.version()).origin(REPLICA);
    replicaEngine.index(index);
    assertThat(index.version(), equalTo(1l));
  }
  @Test
  public void testVersioningReplicaConflict2() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(1l));

    // apply the first index to the replica, should work fine
    index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
    replicaEngine.index(index);
    assertThat(index.version(), equalTo(1l));

    // index it again
    index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(2l));

    // now delete it
    Engine.Delete delete = new Engine.Delete("test", "1", newUid("1"));
    engine.delete(delete);
    assertThat(delete.version(), equalTo(3l));

    // apply the delete on the replica (skipping the second index)
    delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
    replicaEngine.delete(delete);
    assertThat(delete.version(), equalTo(3l));

    // second time delete with same version should fail
    try {
      delete = new Engine.Delete("test", "1", newUid("1")).version(3l).origin(REPLICA);
      replicaEngine.delete(delete);
      assertThat(delete.version(), equalTo(3l));
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    // now do the second index on the replica, it should fail
    try {
      index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
      replicaEngine.index(index);
      assertThat(index.version(), equalTo(2l));
    } catch (VersionConflictEngineException e) {
      // all is well
    }
  }
  @Test
  public void testCreatedFlagAfterFlush() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertTrue(index.created());

    engine.delete(new Engine.Delete(null, "1", newUid("1")));

    engine.flush(new Engine.Flush());

    index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertTrue(index.created());
  }
  @Test
  public void testExternalVersioningIndexConflict() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index =
        new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(12);
    engine.index(index);
    assertThat(index.version(), equalTo(12l));

    index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(14);
    engine.index(index);
    assertThat(index.version(), equalTo(14l));

    index = new Engine.Index(null, newUid("1"), doc).versionType(VersionType.EXTERNAL).version(13l);
    try {
      engine.index(index);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }
  }
  @Test
  public void testVersioningReplicaConflict1() {
    ParsedDocument doc =
        testParsedDocument(
            "1", "1", "test", null, -1, -1, testDocument(), Lucene.STANDARD_ANALYZER, B_1, false);
    Engine.Index index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(1l));

    index = new Engine.Index(null, newUid("1"), doc);
    engine.index(index);
    assertThat(index.version(), equalTo(2l));

    // apply the second index to the replica, should work fine
    index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
    replicaEngine.index(index);
    assertThat(index.version(), equalTo(2l));

    // now, the old one should not work
    index = new Engine.Index(null, newUid("1"), doc).version(1l).origin(REPLICA);
    try {
      replicaEngine.index(index);
      assert false;
    } catch (VersionConflictEngineException e) {
      // all is well
    }

    // second version on replica should fail as well
    try {
      index = new Engine.Index(null, newUid("1"), doc).version(2l).origin(REPLICA);
      replicaEngine.index(index);
      assertThat(index.version(), equalTo(2l));
    } catch (VersionConflictEngineException e) {
      // all is well
    }
  }
  @Override
  protected PrimaryResponse<IndexResponse, IndexRequest> shardOperationOnPrimary(
      ClusterState clusterState, PrimaryOperationRequest shardRequest) {
    final IndexRequest request = shardRequest.request;

    // validate, if routing is required, that we got routing
    IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex());
    MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
    if (mappingMd != null && mappingMd.routing().required()) {
      if (request.routing() == null) {
        throw new RoutingMissingException(
            shardRequest.shardId.getIndex(), request.type(), request.id());
      }
    }

    IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
    IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id());
    SourceToParse sourceToParse =
        SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source())
            .type(request.type())
            .id(request.id())
            .routing(request.routing())
            .parent(request.parent())
            .timestamp(request.timestamp())
            .ttl(request.ttl());
    long version;
    boolean created;
    Engine.IndexingOperation op;
    if (request.opType() == IndexRequest.OpType.INDEX) {
      Engine.Index index =
          indexShard.prepareIndex(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates());
      if (index.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), index.docMapper(), indexService.indexUUID());
      }
      indexShard.index(index);
      version = index.version();
      op = index;
      created = index.created();
    } else {
      Engine.Create create =
          indexShard.prepareCreate(
              sourceToParse,
              request.version(),
              request.versionType(),
              Engine.Operation.Origin.PRIMARY,
              request.canHaveDuplicates(),
              request.autoGeneratedId());
      if (create.parsedDoc().mappingsModified()) {
        mappingUpdatedAction.updateMappingOnMaster(
            shardRequest.shardId.getIndex(), create.docMapper(), indexService.indexUUID());
      }
      indexShard.create(create);
      version = create.version();
      op = create;
      created = true;
    }
    if (request.refresh()) {
      try {
        indexShard.refresh(new Engine.Refresh("refresh_flag_index").force(false));
      } catch (Throwable e) {
        // ignore
      }
    }

    // update the version on the request, so it will be used for the replicas
    request.version(version);
    request.versionType(request.versionType().versionTypeForReplicationAndRecovery());

    assert request.versionType().validateVersionForWrites(request.version());

    IndexResponse response =
        new IndexResponse(
            shardRequest.shardId.getIndex(), request.type(), request.id(), version, created);
    return new PrimaryResponse<>(shardRequest.request, response, op);
  }
예제 #16
0
 public Index(Engine.Index index) {
   this(index.type(), index.id(), index.source());
   this.routing = index.routing();
   this.parent = index.parent();
   this.version = index.version();
 }
 @Override
 protected PrimaryResponse<IngestShardResponse, IngestShardRequest> shardOperationOnPrimary(
     ClusterState clusterState, PrimaryOperationRequest shardRequest) {
   final IngestShardRequest request = shardRequest.request;
   IndexShard indexShard =
       indicesService
           .indexServiceSafe(shardRequest.request.index())
           .shardSafe(shardRequest.shardId);
   int successSize = 0;
   List<IngestItemFailure> failure = newLinkedList();
   int size = request.items().size();
   long[] versions = new long[size];
   Set<Tuple<String, String>> mappingsToUpdate = newHashSet();
   for (int i = 0; i < size; i++) {
     IngestItemRequest item = request.items().get(i);
     if (item.request() instanceof IndexRequest) {
       IndexRequest indexRequest = (IndexRequest) item.request();
       Engine.IndexingOperation op = null;
       try {
         // validate, if routing is required, that we got routing
         MappingMetaData mappingMd =
             clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type());
         if (mappingMd != null && mappingMd.routing().required()) {
           if (indexRequest.routing() == null) {
             throw new RoutingMissingException(
                 indexRequest.index(), indexRequest.type(), indexRequest.id());
           }
         }
         SourceToParse sourceToParse =
             SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source())
                 .type(indexRequest.type())
                 .id(indexRequest.id())
                 .routing(indexRequest.routing())
                 .parent(indexRequest.parent())
                 .timestamp(indexRequest.timestamp())
                 .ttl(indexRequest.ttl());
         long version;
         if (indexRequest.opType() == IndexRequest.OpType.INDEX) {
           Engine.Index index =
               indexShard
                   .prepareIndex(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = index;
           indexShard.index(index);
           version = index.version();
         } else {
           Engine.Create create =
               indexShard
                   .prepareCreate(sourceToParse)
                   .version(indexRequest.version())
                   .versionType(indexRequest.versionType())
                   .origin(Engine.Operation.Origin.PRIMARY);
           op = create;
           indexShard.create(create);
           version = create.version();
         }
         versions[i] = indexRequest.version();
         // update the version on request so it will happen on the replicas
         indexRequest.version(version);
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         } else {
           logger.error(
               "[{}][{}] failed to execute bulk item (index) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               indexRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       } finally {
         // update mapping on master if needed, we won't update changes to the same type, since
         // once its changed, it won't have mappers added
         if (op != null && op.parsedDoc().mappingsModified()) {
           mappingsToUpdate.add(Tuple.tuple(indexRequest.index(), indexRequest.type()));
         }
       }
     } else if (item.request() instanceof DeleteRequest) {
       DeleteRequest deleteRequest = (DeleteRequest) item.request();
       try {
         Engine.Delete delete =
             indexShard
                 .prepareDelete(deleteRequest.type(), deleteRequest.id(), deleteRequest.version())
                 .versionType(deleteRequest.versionType())
                 .origin(Engine.Operation.Origin.PRIMARY);
         indexShard.delete(delete);
         // update the request with teh version so it will go to the replicas
         deleteRequest.version(delete.version());
         successSize++;
       } catch (Throwable e) {
         // rethrow the failure if we are going to retry on primary and let parent failure to
         // handle it
         if (retryPrimaryException(e)) {
           // restore updated versions...
           for (int j = 0; j < i; j++) {
             applyVersion(request.items().get(j), versions[j]);
           }
           logger.error(e.getMessage(), e);
           throw new ElasticsearchException(e.getMessage());
         }
         if (e instanceof ElasticsearchException
             && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
           logger.trace(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         } else {
           logger.debug(
               "[{}][{}] failed to execute bulk item (delete) {}",
               e,
               shardRequest.request.index(),
               shardRequest.shardId,
               deleteRequest);
         }
         failure.add(new IngestItemFailure(item.id(), ExceptionsHelper.detailedMessage(e)));
         // nullify the request so it won't execute on the replicas
         request.items().set(i, null);
       }
     }
   }
   if (!mappingsToUpdate.isEmpty()) {
     for (Tuple<String, String> mappingToUpdate : mappingsToUpdate) {
       logger.info("mapping update {} {}", mappingToUpdate.v1(), mappingToUpdate.v2());
       updateMappingOnMaster(mappingToUpdate.v1(), mappingToUpdate.v2());
     }
   }
   IngestShardResponse response =
       new IngestShardResponse(
           new ShardId(request.index(), request.shardId()), successSize, failure);
   return new PrimaryResponse<IngestShardResponse, IngestShardRequest>(
       shardRequest.request, response, null);
 }