public void testV0LegacyTranslogVersion() throws Exception {
    Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v0.binary");
    assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
    try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
      assertThat(
          "a version0 stream is returned", reader instanceof LegacyTranslogReader, equalTo(true));
      try (final Translog.Snapshot snapshot = reader.newSnapshot()) {
        final Translog.Operation operation = snapshot.next();
        assertThat(
            "operation is the correct type correctly",
            operation.opType() == Translog.Operation.Type.INDEX,
            equalTo(true));
        Translog.Index op = (Translog.Index) operation;
        assertThat(op.id(), equalTo("1"));
        assertThat(op.type(), equalTo("doc"));
        assertThat(
            op.source().toUtf8(),
            equalTo("{\"body\": \"worda wordb wordc wordd \\\"worde\\\" wordf\"}"));
        assertThat(op.routing(), equalTo(null));
        assertThat(op.parent(), equalTo(null));
        assertThat(op.version(), equalTo(1L));
        assertThat(op.timestamp(), equalTo(1407312091791L));
        assertThat(op.ttl(), equalTo(-1L));
        assertThat(op.versionType(), equalTo(VersionType.INTERNAL));

        assertNull(snapshot.next());
      }
    }
  }
示例#2
0
  @Override
  public GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
    try (ReleasableLock lock = readLock.acquire()) {
      ensureOpen();
      if (get.realtime()) {
        VersionValue versionValue = versionMap.getUnderLock(get.uid().bytes());
        if (versionValue != null) {
          if (versionValue.delete()) {
            return GetResult.NOT_EXISTS;
          }
          if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) {
            Uid uid = Uid.createUid(get.uid().text());
            throw new VersionConflictEngineException(
                shardId,
                uid.type(),
                uid.id(),
                get.versionType().explainConflictForReads(versionValue.version(), get.version()));
          }
          Translog.Operation op = translog.read(versionValue.translogLocation());
          if (op != null) {
            return new GetResult(true, versionValue.version(), op.getSource());
          }
        }
      }

      // no version, get the version from the index, we know that we refresh on flush
      return getFromSearcher(get, searcherFactory);
    }
  }
  public void testV1ChecksummedTranslogVersion() throws Exception {
    Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1.binary");
    assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
    try (ImmutableTranslogReader reader = openReader(translogFile, 0)) {
      try (final Translog.Snapshot snapshot = reader.newSnapshot()) {

        assertThat(
            "a version1 stream is returned",
            reader instanceof ImmutableTranslogReader,
            equalTo(true));

        Translog.Operation operation = snapshot.next();

        assertThat(
            "operation is the correct type correctly",
            operation.opType() == Translog.Operation.Type.INDEX,
            equalTo(true));
        Translog.Index op = (Translog.Index) operation;
        assertThat(op.id(), equalTo("Bwiq98KFSb6YjJQGeSpeiw"));
        assertThat(op.type(), equalTo("doc"));
        assertThat(op.source().toUtf8(), equalTo("{\"body\": \"foo\"}"));
        assertThat(op.routing(), equalTo(null));
        assertThat(op.parent(), equalTo(null));
        assertThat(op.version(), equalTo(1L));
        assertThat(op.timestamp(), equalTo(1408627184844L));
        assertThat(op.ttl(), equalTo(-1L));
        assertThat(op.versionType(), equalTo(VersionType.INTERNAL));

        // There are more operations
        int opNum = 1;
        while (snapshot.next() != null) {
          opNum++;
        }
        assertThat("there should be 5 translog operations", opNum, equalTo(5));
      }
    }
  }
  /**
   * Send the given snapshot's operations to this handler's target node.
   *
   * <p>Operations are bulked into a single request depending on an operation count limit or
   * size-in-bytes limit
   *
   * @return the total number of translog operations that were sent
   */
  protected int sendSnapshot(final Translog.Snapshot snapshot) {
    int ops = 0;
    long size = 0;
    int totalOperations = 0;
    final List<Translog.Operation> operations = new ArrayList<>();
    Translog.Operation operation;
    try {
      operation = snapshot.next(); // this ex should bubble up
    } catch (IOException ex) {
      throw new ElasticsearchException("failed to get next operation from translog", ex);
    }

    if (operation == null) {
      logger.trace(
          "[{}][{}] no translog operations to send to {}",
          indexName,
          shardId,
          request.targetNode());
    }
    while (operation != null) {
      if (shard.state() == IndexShardState.CLOSED) {
        throw new IndexShardClosedException(request.shardId());
      }
      cancellableThreads.checkForCancel();
      operations.add(operation);
      ops += 1;
      size += operation.estimateSize();
      totalOperations++;

      // Check if this request is past bytes threshold, and
      // if so, send it off
      if (size >= chunkSizeInBytes) {

        // don't throttle translog, since we lock for phase3 indexing,
        // so we need to move it as fast as possible. Note, since we
        // index docs to replicas while the index files are recovered
        // the lock can potentially be removed, in which case, it might
        // make sense to re-enable throttling in this phase
        cancellableThreads.execute(
            () -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
        if (logger.isTraceEnabled()) {
          logger.trace(
              "[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}",
              indexName,
              shardId,
              ops,
              new ByteSizeValue(size),
              snapshot.totalOperations(),
              request.targetNode());
        }

        ops = 0;
        size = 0;
        operations.clear();
      }
      try {
        operation = snapshot.next(); // this ex should bubble up
      } catch (IOException ex) {
        throw new ElasticsearchException("failed to get next operation from translog", ex);
      }
    }
    // send the leftover
    if (!operations.isEmpty()) {
      cancellableThreads.execute(
          () -> recoveryTarget.indexTranslogOperations(operations, snapshot.totalOperations()));
    }
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}][{}] sent final batch of [{}][{}] (total: [{}]) translog operations to {}",
          indexName,
          shardId,
          ops,
          new ByteSizeValue(size),
          snapshot.totalOperations(),
          request.targetNode());
    }
    return totalOperations;
  }
  /**
   * Send the given snapshot's operations to this handler's target node.
   *
   * <p>Operations are bulked into a single request depending on an operation count limit or
   * size-in-bytes limit
   *
   * @return the total number of translog operations that were sent
   */
  protected int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException {
    int ops = 0;
    long size = 0;
    int totalOperations = 0;
    final List<Translog.Operation> operations = Lists.newArrayList();
    Translog.Operation operation = snapshot.next();

    final TransportRequestOptions recoveryOptions =
        TransportRequestOptions.options()
            .withCompress(recoverySettings.compress())
            .withType(TransportRequestOptions.Type.RECOVERY)
            .withTimeout(recoverySettings.internalActionLongTimeout());

    if (operation == null) {
      logger.trace(
          "[{}][{}] no translog operations (id: [{}]) to send to {}",
          indexName,
          shardId,
          snapshot.translogId(),
          request.targetNode());
    }
    while (operation != null) {
      if (shard.state() == IndexShardState.CLOSED) {
        throw new IndexShardClosedException(request.shardId());
      }
      cancellableThreads.checkForCancel();
      operations.add(operation);
      ops += 1;
      size += operation.estimateSize();
      totalOperations++;

      // Check if this request is past the size or bytes threshold, and
      // if so, send it off
      if (ops >= recoverySettings.translogOps()
          || size >= recoverySettings.translogSize().bytes()) {

        // don't throttle translog, since we lock for phase3 indexing,
        // so we need to move it as fast as possible. Note, since we
        // index docs to replicas while the index files are recovered
        // the lock can potentially be removed, in which case, it might
        // make sense to re-enable throttling in this phase
        //                if (recoverySettings.rateLimiter() != null) {
        //                    recoverySettings.rateLimiter().pause(size);
        //                }

        if (logger.isTraceEnabled()) {
          logger.trace(
              "[{}][{}] sending batch of [{}][{}] (total: [{}], id: [{}]) translog operations to {}",
              indexName,
              shardId,
              ops,
              new ByteSizeValue(size),
              shard.translog().estimatedNumberOfOperations(),
              snapshot.translogId(),
              request.targetNode());
        }
        cancellableThreads.execute(
            new Interruptable() {
              @Override
              public void run() throws InterruptedException {
                final RecoveryTranslogOperationsRequest translogOperationsRequest =
                    new RecoveryTranslogOperationsRequest(
                        request.recoveryId(),
                        request.shardId(),
                        operations,
                        shard.translog().estimatedNumberOfOperations());
                transportService
                    .submitRequest(
                        request.targetNode(),
                        RecoveryTarget.Actions.TRANSLOG_OPS,
                        translogOperationsRequest,
                        recoveryOptions,
                        EmptyTransportResponseHandler.INSTANCE_SAME)
                    .txGet();
              }
            });

        ops = 0;
        size = 0;
        operations.clear();
      }
      operation = snapshot.next();
    }
    // send the leftover
    if (logger.isTraceEnabled()) {
      logger.trace(
          "[{}][{}] sending final batch of [{}][{}] (total: [{}], id: [{}]) translog operations to {}",
          indexName,
          shardId,
          ops,
          new ByteSizeValue(size),
          shard.translog().estimatedNumberOfOperations(),
          snapshot.translogId(),
          request.targetNode());
    }
    if (!operations.isEmpty()) {
      cancellableThreads.execute(
          new Interruptable() {
            @Override
            public void run() throws InterruptedException {
              RecoveryTranslogOperationsRequest translogOperationsRequest =
                  new RecoveryTranslogOperationsRequest(
                      request.recoveryId(),
                      request.shardId(),
                      operations,
                      shard.translog().estimatedNumberOfOperations());
              transportService
                  .submitRequest(
                      request.targetNode(),
                      RecoveryTarget.Actions.TRANSLOG_OPS,
                      translogOperationsRequest,
                      recoveryOptions,
                      EmptyTransportResponseHandler.INSTANCE_SAME)
                  .txGet();
            }
          });
    }
    return totalOperations;
  }