Example #1
0
 LuceneSnapshotIterator(
     File indexDirectory, IndexCommit snapshotPoint, SnapshotDeletionPolicy deletionPolicy)
     throws IOException {
   this.indexDirectory = indexDirectory;
   this.deletionPolicy = deletionPolicy;
   this.fileNames = snapshotPoint.getFileNames().iterator();
 }
  static String str(IndexCommit commit) {
    StringBuilder sb = new StringBuilder();
    try {
      sb.append("commit{");

      Directory dir = commit.getDirectory();

      if (dir instanceof FSDirectory) {
        FSDirectory fsd = (FSDirectory) dir;
        sb.append("dir=").append(fsd.getDirectory());
      } else {
        sb.append("dir=").append(dir);
      }

      sb.append(",segFN=").append(commit.getSegmentsFileName());
      sb.append(",version=").append(commit.getVersion());
      sb.append(",generation=").append(commit.getGeneration());
      sb.append(",filenames=").append(commit.getFileNames());
    } catch (Exception e) {
      sb.append(e);
    }
    return sb.toString();
  }
 @Override
 public Collection getFileNames() throws IOException {
   return delegate.getFileNames();
 }
  /**
   * Perform phase1 of the recovery operations. Once this {@link IndexCommit} snapshot has been
   * performed no commit operations (files being fsync'd) are effectively allowed on this index
   * until all recovery phases are done
   *
   * <p>Phase1 examines the segment files on the target node and copies over the segments that are
   * missing. Only segments that have the same size and checksum can be reused
   */
  public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
      StopWatch stopWatch = new StopWatch().start();
      final Store.MetadataSnapshot recoverySourceMetadata;
      try {
        recoverySourceMetadata = store.getMetadata(snapshot);
      } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
        shard.failShard("recovery", ex);
        throw ex;
      }
      for (String name : snapshot.getFileNames()) {
        final StoreFileMetaData md = recoverySourceMetadata.get(name);
        if (md == null) {
          logger.info(
              "Snapshot differs from actual index for file: {} meta: {}",
              name,
              recoverySourceMetadata.asMap());
          throw new CorruptIndexException(
              "Snapshot differs from actual index - maybe index was removed metadata has "
                  + recoverySourceMetadata.asMap().size()
                  + " files",
              name);
        }
      }
      // Generate a "diff" of all the identical, different, and missing
      // segment files on the target node, using the existing files on
      // the source node
      String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
      String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
      final boolean recoverWithSyncId =
          recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
      if (recoverWithSyncId) {
        final long numDocsTarget = request.metadataSnapshot().getNumDocs();
        final long numDocsSource = recoverySourceMetadata.getNumDocs();
        if (numDocsTarget != numDocsSource) {
          throw new IllegalStateException(
              "try to recover "
                  + request.shardId()
                  + " from primary shard with sync id but number "
                  + "of docs differ: "
                  + numDocsTarget
                  + " ("
                  + request.sourceNode().getName()
                  + ", primary) vs "
                  + numDocsSource
                  + "("
                  + request.targetNode().getName()
                  + ")");
        }
        // we shortcut recovery here because we have nothing to copy. but we must still start the
        // engine on the target.
        // so we don't return here
        logger.trace(
            "[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target",
            indexName,
            shardId,
            request.targetNode(),
            recoverySourceSyncId);
      } else {
        final Store.RecoveryDiff diff =
            recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
        for (StoreFileMetaData md : diff.identical) {
          response.phase1ExistingFileNames.add(md.name());
          response.phase1ExistingFileSizes.add(md.length());
          existingTotalSize += md.length();
          if (logger.isTraceEnabled()) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}],"
                    + " size [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                md.checksum(),
                md.length());
          }
          totalSize += md.length();
        }
        List<StoreFileMetaData> phase1Files =
            new ArrayList<>(diff.different.size() + diff.missing.size());
        phase1Files.addAll(diff.different);
        phase1Files.addAll(diff.missing);
        for (StoreFileMetaData md : phase1Files) {
          if (request.metadataSnapshot().asMap().containsKey(md.name())) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote "
                    + "[{}], local [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                request.metadataSnapshot().asMap().get(md.name()),
                md);
          } else {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                indexName,
                shardId,
                request.targetNode(),
                md.name());
          }
          response.phase1FileNames.add(md.name());
          response.phase1FileSizes.add(md.length());
          totalSize += md.length();
        }

        response.phase1TotalSize = totalSize;
        response.phase1ExistingTotalSize = existingTotalSize;

        logger.trace(
            "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with "
                + "total_size [{}]",
            indexName,
            shardId,
            request.targetNode(),
            response.phase1FileNames.size(),
            new ByteSizeValue(totalSize),
            response.phase1ExistingFileNames.size(),
            new ByteSizeValue(existingTotalSize));
        cancellableThreads.execute(
            () ->
                recoveryTarget.receiveFileInfo(
                    response.phase1FileNames,
                    response.phase1FileSizes,
                    response.phase1ExistingFileNames,
                    response.phase1ExistingFileSizes,
                    translogView.totalOperations()));
        // How many bytes we've copied since we last called RateLimiter.pause
        final Function<StoreFileMetaData, OutputStream> outputStreamFactories =
            md ->
                new BufferedOutputStream(
                    new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
        sendFiles(
            store,
            phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]),
            outputStreamFactories);
        // Send the CLEAN_FILES request, which takes all of the files that
        // were transferred and renames them from their temporary file
        // names to the actual file names. It also writes checksums for
        // the files after they have been renamed.
        //
        // Once the files have been renamed, any other files that are not
        // related to this recovery (out of date segments, for example)
        // are deleted
        try {
          cancellableThreads.executeIO(
              () ->
                  recoveryTarget.cleanFiles(
                      translogView.totalOperations(), recoverySourceMetadata));
        } catch (RemoteTransportException | IOException targetException) {
          final IOException corruptIndexException;
          // we realized that after the index was copied and we wanted to finalize the recovery
          // the index was corrupted:
          //   - maybe due to a broken segments file on an empty index (transferred with no
          // checksum)
          //   - maybe due to old segments without checksums or length only checks
          if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException))
              != null) {
            try {
              final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
              StoreFileMetaData[] metadata =
                  StreamSupport.stream(recoverySourceMetadata1.spliterator(), false)
                      .toArray(size -> new StoreFileMetaData[size]);
              ArrayUtil.timSort(
                  metadata,
                  (o1, o2) -> {
                    return Long.compare(o1.length(), o2.length()); // check small files first
                  });
              for (StoreFileMetaData md : metadata) {
                cancellableThreads.checkForCancel();
                logger.debug(
                    "{} checking integrity for file {} after remove corruption exception",
                    shard.shardId(),
                    md);
                if (store.checkIntegrityNoException(md)
                    == false) { // we are corrupted on the primary -- fail!
                  shard.failShard("recovery", corruptIndexException);
                  logger.warn(
                      "{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                  throw corruptIndexException;
                }
              }
            } catch (IOException ex) {
              targetException.addSuppressed(ex);
              throw targetException;
            }
            // corruption has happened on the way to replica
            RemoteTransportException exception =
                new RemoteTransportException(
                    "File corruption occurred on recovery but " + "checksums are ok", null);
            exception.addSuppressed(targetException);
            logger.warn(
                (org.apache.logging.log4j.util.Supplier<?>)
                    () ->
                        new ParameterizedMessage(
                            "{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
                            shard.shardId(),
                            request.targetNode()),
                corruptIndexException);
            throw exception;
          } else {
            throw targetException;
          }
        }
      }

      prepareTargetForTranslog(translogView.totalOperations());

      logger.trace(
          "[{}][{}] recovery [phase1] to {}: took [{}]",
          indexName,
          shardId,
          request.targetNode(),
          stopWatch.totalTime());
      response.phase1Time = stopWatch.totalTime().millis();
    } catch (Exception e) {
      throw new RecoverFilesRecoveryException(
          request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
      store.decRef();
    }
  }