public void dump(OutputStream outputStream, IndexWriter indexWriter, Lock commitLock)
      throws IOException {

    IndexCommit indexCommit = null;
    String segmentsFileName = null;

    // Lock up to stop external commits while recording dump IndexCommit
    commitLock.lock();
    try {
      // Commit all pending changes to ensure dumped index is integrated
      indexWriter.commit();
      // Record IndexCommit, then release the lock to other dumpers and
      // committers to move on
      indexCommit = _lastIndexCommit;
      segmentsFileName = indexCommit.getSegmentsFileName();
      _dumpingSegmensFiletNames.add(segmentsFileName);
    } finally {
      commitLock.unlock();
    }

    try {
      IndexCommitSerializationUtil.serializeIndex(indexCommit, outputStream);
    } finally {
      // Clear the dumping segments file name, so the old index can be
      // removed.
      _dumpingSegmensFiletNames.remove(segmentsFileName);
    }
  }
 @Override
 public void delete() {
   Long gen = delegate.getGeneration();
   Long reserve = reserves.get(gen);
   if (reserve != null && System.currentTimeMillis() < reserve) return;
   if (savedCommits.containsKey(gen)) return;
   delegate.delete();
 }
  public void onCommit(List<? extends IndexCommit> list) throws IOException {
    _lastIndexCommit = list.get(list.size() - 1);

    for (int i = 0; i < list.size() - 1; i++) {
      IndexCommit indexCommit = list.get(i);
      if (!_dumpingSegmensFiletNames.contains(indexCommit.getSegmentsFileName())) {
        indexCommit.delete();
      }
    }
  }
Пример #4
0
 LuceneSnapshotIterator(
     File indexDirectory, IndexCommit snapshotPoint, SnapshotDeletionPolicy deletionPolicy)
     throws IOException {
   this.indexDirectory = indexDirectory;
   this.deletionPolicy = deletionPolicy;
   this.fileNames = snapshotPoint.getFileNames().iterator();
 }
 public static long getCommitTimestamp(IndexCommit commit) throws IOException {
   final Map<String, String> commitData = commit.getUserData();
   String commitTime = commitData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
   if (commitTime != null) {
     return Long.parseLong(commitTime);
   } else {
     return 0;
   }
 }
Пример #6
0
  private String getId(IndexCommit commit) {
    StringBuilder sb = new StringBuilder();
    Directory dir = commit.getDirectory();

    // For anything persistent, make something that will
    // be the same, regardless of the Directory instance.
    if (dir instanceof FSDirectory) {
      FSDirectory fsd = (FSDirectory) dir;
      File fdir = fsd.getDirectory();
      sb.append(fdir.getPath());
    } else {
      sb.append(dir);
    }

    sb.append('/');
    sb.append(commit.getGeneration());
    sb.append('_');
    sb.append(commit.getVersion());
    return sb.toString();
  }
Пример #7
0
  private void updateCommits(List<IndexCommit> commits) {
    // to be safe, we should only call delete on a commit point passed to us
    // in this specific call (may be across diff IndexWriter instances).
    // this will happen rarely, so just synchronize everything
    // for safety and to avoid race conditions

    synchronized (this) {
      long maxCommitAgeTimeStamp = -1L;
      IndexCommit newest = commits.get(commits.size() - 1);
      log.info("newest commit = " + newest.getVersion());

      int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
      int totalKept = 1;

      // work our way from newest to oldest, skipping the first since we always want to keep it.
      for (int i = commits.size() - 2; i >= 0; i--) {
        IndexCommit commit = commits.get(i);

        // delete anything too old, regardless of other policies
        try {
          if (maxCommitAge != null) {
            if (maxCommitAgeTimeStamp == -1) {
              DateMathParser dmp = new DateMathParser(DateField.UTC, Locale.US);
              maxCommitAgeTimeStamp = dmp.parseMath(maxCommitAge).getTime();
            }
            if (commit.getTimestamp() < maxCommitAgeTimeStamp) {
              commit.delete();
              continue;
            }
          }
        } catch (Exception e) {
          log.warn("Exception while checking commit point's age for deletion", e);
        }

        if (singleSegKept < maxOptimizedCommitsToKeep && commit.getSegmentCount() == 1) {
          totalKept++;
          singleSegKept++;
          continue;
        }

        if (totalKept < maxCommitsToKeep) {
          totalKept++;
          continue;
        }

        commit.delete();
      }
    } // end synchronized
  }
Пример #8
0
  static String str(IndexCommit commit) {
    StringBuilder sb = new StringBuilder();
    try {
      sb.append("commit{");

      Directory dir = commit.getDirectory();

      if (dir instanceof FSDirectory) {
        FSDirectory fsd = (FSDirectory) dir;
        sb.append("dir=").append(fsd.getDirectory());
      } else {
        sb.append("dir=").append(dir);
      }

      sb.append(",segFN=").append(commit.getSegmentsFileName());
      sb.append(",version=").append(commit.getVersion());
      sb.append(",generation=").append(commit.getGeneration());
      sb.append(",filenames=").append(commit.getFileNames());
    } catch (Exception e) {
      sb.append(e);
    }
    return sb.toString();
  }
 @Override
 public String getSegmentsFileName() {
   return delegate.getSegmentsFileName();
 }
 @Override
 public Map getUserData() throws IOException {
   return delegate.getUserData();
 }
 @Override
 public boolean isDeleted() {
   return delegate.isDeleted();
 }
 @Override
 public long getGeneration() {
   return delegate.getGeneration();
 }
 @Override
 public int hashCode() {
   return delegate.hashCode();
 }
 @Override
 public boolean equals(Object o) {
   return delegate.equals(o);
 }
 @Override
 public int getSegmentCount() {
   return delegate.getSegmentCount();
 }
 @Override
 public Directory getDirectory() {
   return delegate.getDirectory();
 }
 @Override
 public Collection getFileNames() throws IOException {
   return delegate.getFileNames();
 }
  /**
   * Perform phase1 of the recovery operations. Once this {@link IndexCommit} snapshot has been
   * performed no commit operations (files being fsync'd) are effectively allowed on this index
   * until all recovery phases are done
   *
   * <p>Phase1 examines the segment files on the target node and copies over the segments that are
   * missing. Only segments that have the same size and checksum can be reused
   */
  public void phase1(final IndexCommit snapshot, final Translog.View translogView) {
    cancellableThreads.checkForCancel();
    // Total size of segment files that are recovered
    long totalSize = 0;
    // Total size of segment files that were able to be re-used
    long existingTotalSize = 0;
    final Store store = shard.store();
    store.incRef();
    try {
      StopWatch stopWatch = new StopWatch().start();
      final Store.MetadataSnapshot recoverySourceMetadata;
      try {
        recoverySourceMetadata = store.getMetadata(snapshot);
      } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
        shard.failShard("recovery", ex);
        throw ex;
      }
      for (String name : snapshot.getFileNames()) {
        final StoreFileMetaData md = recoverySourceMetadata.get(name);
        if (md == null) {
          logger.info(
              "Snapshot differs from actual index for file: {} meta: {}",
              name,
              recoverySourceMetadata.asMap());
          throw new CorruptIndexException(
              "Snapshot differs from actual index - maybe index was removed metadata has "
                  + recoverySourceMetadata.asMap().size()
                  + " files",
              name);
        }
      }
      // Generate a "diff" of all the identical, different, and missing
      // segment files on the target node, using the existing files on
      // the source node
      String recoverySourceSyncId = recoverySourceMetadata.getSyncId();
      String recoveryTargetSyncId = request.metadataSnapshot().getSyncId();
      final boolean recoverWithSyncId =
          recoverySourceSyncId != null && recoverySourceSyncId.equals(recoveryTargetSyncId);
      if (recoverWithSyncId) {
        final long numDocsTarget = request.metadataSnapshot().getNumDocs();
        final long numDocsSource = recoverySourceMetadata.getNumDocs();
        if (numDocsTarget != numDocsSource) {
          throw new IllegalStateException(
              "try to recover "
                  + request.shardId()
                  + " from primary shard with sync id but number "
                  + "of docs differ: "
                  + numDocsTarget
                  + " ("
                  + request.sourceNode().getName()
                  + ", primary) vs "
                  + numDocsSource
                  + "("
                  + request.targetNode().getName()
                  + ")");
        }
        // we shortcut recovery here because we have nothing to copy. but we must still start the
        // engine on the target.
        // so we don't return here
        logger.trace(
            "[{}][{}] skipping [phase1] to {} - identical sync id [{}] found on both source and target",
            indexName,
            shardId,
            request.targetNode(),
            recoverySourceSyncId);
      } else {
        final Store.RecoveryDiff diff =
            recoverySourceMetadata.recoveryDiff(request.metadataSnapshot());
        for (StoreFileMetaData md : diff.identical) {
          response.phase1ExistingFileNames.add(md.name());
          response.phase1ExistingFileSizes.add(md.length());
          existingTotalSize += md.length();
          if (logger.isTraceEnabled()) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: not recovering [{}], exists in local store and has checksum [{}],"
                    + " size [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                md.checksum(),
                md.length());
          }
          totalSize += md.length();
        }
        List<StoreFileMetaData> phase1Files =
            new ArrayList<>(diff.different.size() + diff.missing.size());
        phase1Files.addAll(diff.different);
        phase1Files.addAll(diff.missing);
        for (StoreFileMetaData md : phase1Files) {
          if (request.metadataSnapshot().asMap().containsKey(md.name())) {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], exists in local store, but is different: remote "
                    + "[{}], local [{}]",
                indexName,
                shardId,
                request.targetNode(),
                md.name(),
                request.metadataSnapshot().asMap().get(md.name()),
                md);
          } else {
            logger.trace(
                "[{}][{}] recovery [phase1] to {}: recovering [{}], does not exists in remote",
                indexName,
                shardId,
                request.targetNode(),
                md.name());
          }
          response.phase1FileNames.add(md.name());
          response.phase1FileSizes.add(md.length());
          totalSize += md.length();
        }

        response.phase1TotalSize = totalSize;
        response.phase1ExistingTotalSize = existingTotalSize;

        logger.trace(
            "[{}][{}] recovery [phase1] to {}: recovering_files [{}] with total_size [{}], reusing_files [{}] with "
                + "total_size [{}]",
            indexName,
            shardId,
            request.targetNode(),
            response.phase1FileNames.size(),
            new ByteSizeValue(totalSize),
            response.phase1ExistingFileNames.size(),
            new ByteSizeValue(existingTotalSize));
        cancellableThreads.execute(
            () ->
                recoveryTarget.receiveFileInfo(
                    response.phase1FileNames,
                    response.phase1FileSizes,
                    response.phase1ExistingFileNames,
                    response.phase1ExistingFileSizes,
                    translogView.totalOperations()));
        // How many bytes we've copied since we last called RateLimiter.pause
        final Function<StoreFileMetaData, OutputStream> outputStreamFactories =
            md ->
                new BufferedOutputStream(
                    new RecoveryOutputStream(md, translogView), chunkSizeInBytes);
        sendFiles(
            store,
            phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]),
            outputStreamFactories);
        // Send the CLEAN_FILES request, which takes all of the files that
        // were transferred and renames them from their temporary file
        // names to the actual file names. It also writes checksums for
        // the files after they have been renamed.
        //
        // Once the files have been renamed, any other files that are not
        // related to this recovery (out of date segments, for example)
        // are deleted
        try {
          cancellableThreads.executeIO(
              () ->
                  recoveryTarget.cleanFiles(
                      translogView.totalOperations(), recoverySourceMetadata));
        } catch (RemoteTransportException | IOException targetException) {
          final IOException corruptIndexException;
          // we realized that after the index was copied and we wanted to finalize the recovery
          // the index was corrupted:
          //   - maybe due to a broken segments file on an empty index (transferred with no
          // checksum)
          //   - maybe due to old segments without checksums or length only checks
          if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(targetException))
              != null) {
            try {
              final Store.MetadataSnapshot recoverySourceMetadata1 = store.getMetadata(snapshot);
              StoreFileMetaData[] metadata =
                  StreamSupport.stream(recoverySourceMetadata1.spliterator(), false)
                      .toArray(size -> new StoreFileMetaData[size]);
              ArrayUtil.timSort(
                  metadata,
                  (o1, o2) -> {
                    return Long.compare(o1.length(), o2.length()); // check small files first
                  });
              for (StoreFileMetaData md : metadata) {
                cancellableThreads.checkForCancel();
                logger.debug(
                    "{} checking integrity for file {} after remove corruption exception",
                    shard.shardId(),
                    md);
                if (store.checkIntegrityNoException(md)
                    == false) { // we are corrupted on the primary -- fail!
                  shard.failShard("recovery", corruptIndexException);
                  logger.warn(
                      "{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
                  throw corruptIndexException;
                }
              }
            } catch (IOException ex) {
              targetException.addSuppressed(ex);
              throw targetException;
            }
            // corruption has happened on the way to replica
            RemoteTransportException exception =
                new RemoteTransportException(
                    "File corruption occurred on recovery but " + "checksums are ok", null);
            exception.addSuppressed(targetException);
            logger.warn(
                (org.apache.logging.log4j.util.Supplier<?>)
                    () ->
                        new ParameterizedMessage(
                            "{} Remote file corruption during finalization of recovery on node {}. local checksum OK",
                            shard.shardId(),
                            request.targetNode()),
                corruptIndexException);
            throw exception;
          } else {
            throw targetException;
          }
        }
      }

      prepareTargetForTranslog(translogView.totalOperations());

      logger.trace(
          "[{}][{}] recovery [phase1] to {}: took [{}]",
          indexName,
          shardId,
          request.targetNode(),
          stopWatch.totalTime());
      response.phase1Time = stopWatch.totalTime().millis();
    } catch (Exception e) {
      throw new RecoverFilesRecoveryException(
          request.shardId(), response.phase1FileNames.size(), new ByteSizeValue(totalSize), e);
    } finally {
      store.decRef();
    }
  }
 @Override
 public boolean isOptimized() {
   return delegate.isOptimized();
 }
 @Override
 public long getVersion() {
   return delegate.getVersion();
 }
 @Override
 public long getTimestamp() throws IOException {
   return delegate.getTimestamp();
 }