Exemple #1
0
  @Test
  public void testCleanUpWithLegacyChecksums() throws IOException {
    Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
    metaDataMap.put(
        "segments_1",
        new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[] {1})));
    metaDataMap.put(
        "_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
    Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(metaDataMap);

    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    for (String file : metaDataMap.keySet()) {
      try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        CodecUtil.writeFooter(output);
      }
    }

    store.verifyAfterCleanup(snapshot, snapshot);
    store.deleteContent();
    IOUtils.close(store);
  }
Exemple #2
0
  public void testOnCloseCallback() throws IOException {
    final ShardId shardId =
        new ShardId(
            new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10)),
            randomIntBetween(0, 100));
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    final AtomicInteger count = new AtomicInteger(0);
    final ShardLock lock = new DummyShardLock(shardId);

    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            lock,
            new Store.OnClose() {
              @Override
              public void handle(ShardLock theLock) {
                assertEquals(shardId, theLock.getShardId());
                assertEquals(lock, theLock);
                count.incrementAndGet();
              }
            });
    assertEquals(count.get(), 0);

    final int iters = randomIntBetween(1, 10);
    for (int i = 0; i < iters; i++) {
      store.close();
    }

    assertEquals(count.get(), 1);
  }
 private IndexWriter createWriter() throws IOException {
   IndexWriter indexWriter = null;
   try {
     // release locks when started
     if (IndexWriter.isLocked(store.directory())) {
       logger.warn("shard is locked, releasing lock");
       IndexWriter.unlock(store.directory());
     }
     boolean create = !IndexReader.indexExists(store.directory());
     indexWriter =
         new IndexWriter(
             store.directory(),
             analysisService.defaultIndexAnalyzer(),
             create,
             deletionPolicy,
             IndexWriter.MaxFieldLength.UNLIMITED);
     indexWriter.setMergeScheduler(mergeScheduler.newMergeScheduler());
     indexWriter.setMergePolicy(mergePolicyProvider.newMergePolicy(indexWriter));
     indexWriter.setSimilarity(similarityService.defaultIndexSimilarity());
     indexWriter.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
     indexWriter.setTermIndexInterval(termIndexInterval);
   } catch (IOException e) {
     safeClose(indexWriter);
     throw e;
   }
   return indexWriter;
 }
  @Inject
  public LogByteSizeMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
    super(store.shardId(), store.indexSettings());
    Preconditions.checkNotNull(store, "Store must be provided to merge policy");
    this.indexSettingsService = indexSettingsService;

    this.compoundFormat =
        indexSettings.getAsBoolean("index.compound_format", store.suggestUseCompoundFile());
    this.minMergeSize =
        componentSettings.getAsBytesSize(
            "min_merge_size",
            new ByteSizeValue(
                (long) (LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024),
                ByteSizeUnit.BYTES));
    this.maxMergeSize =
        componentSettings.getAsBytesSize(
            "max_merge_size",
            new ByteSizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, ByteSizeUnit.MB));
    this.mergeFactor =
        componentSettings.getAsInt("merge_factor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR);
    this.maxMergeDocs =
        componentSettings.getAsInt("max_merge_docs", LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS);
    this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
    this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
    logger.debug(
        "using [log_bytes_size] merge policy with merge_factor[{}], min_merge_size[{}], max_merge_size[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
        mergeFactor,
        minMergeSize,
        maxMergeSize,
        maxMergeDocs,
        calibrateSizeByDeletes,
        asyncMerge);

    indexSettingsService.addListener(applySettings);
  }
 @Override
 public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     // first, we go and move files that were created with the recovery id suffix to
     // the actual names, its ok if we have a corrupted index here, since we have replicas
     // to recover from in case of a full cluster shutdown just when this code executes...
     recoveryStatus
         .indexShard()
         .deleteShardState(); // we have to delete it first since even if we fail to rename the
                              // shard might be invalid
     recoveryStatus.renameAllTempFiles();
     final Store store = recoveryStatus.store();
     // now write checksums
     recoveryStatus.legacyChecksums().write(store);
     Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot();
     try {
       store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
     } catch (CorruptIndexException
         | IndexFormatTooNewException
         | IndexFormatTooOldException ex) {
       // this is a fatal exception at this stage.
       // this means we transferred files from the remote that have not be checksummed and they
       // are
       // broken. We have to clean up this shard entirely, remove all files and bubble it up to
       // the
       // source shard since this index might be broken there as well? The Source can handle this
       // and checks
       // its content on disk if possible.
       try {
         try {
           store.removeCorruptionMarker();
         } finally {
           Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files
         }
       } catch (Throwable e) {
         logger.debug("Failed to clean lucene index", e);
         ex.addSuppressed(e);
       }
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     } catch (Exception ex) {
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     }
     channel.sendResponse(TransportResponse.Empty.INSTANCE);
   }
 }
Exemple #6
0
 public void assertDeleteContent(Store store, DirectoryService service) throws IOException {
   store.deleteContent();
   assertThat(
       Arrays.toString(store.directory().listAll()),
       store.directory().listAll().length,
       equalTo(0));
   assertThat(store.stats().sizeInBytes(), equalTo(0l));
   for (Directory dir : service.build()) {
     assertThat(dir.listAll().length, equalTo(0));
   }
 }
 @Override
 public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     final Store store = recoveryStatus.store();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     final RecoveryState.Index indexState = recoveryStatus.state().getIndex();
     if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
       indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
     }
     IndexOutput indexOutput;
     if (request.position() == 0) {
       indexOutput =
           recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
     } else {
       indexOutput = recoveryStatus.getOpenIndexOutput(request.name());
     }
     BytesReference content = request.content();
     if (!content.hasArray()) {
       content = content.toBytesArray();
     }
     RateLimiter rl = recoverySettings.rateLimiter();
     if (rl != null) {
       long bytes = bytesSinceLastPause.addAndGet(content.length());
       if (bytes > rl.getMinPauseCheckBytes()) {
         // Time to pause
         bytesSinceLastPause.addAndGet(-bytes);
         long throttleTimeInNanos = rl.pause(bytes);
         indexState.addTargetThrottling(throttleTimeInNanos);
         recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
       }
     }
     indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
     indexState.addRecoveredBytesToFile(request.name(), content.length());
     if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) {
       try {
         Store.verify(indexOutput);
       } finally {
         // we are done
         indexOutput.close();
       }
       // write the checksum
       recoveryStatus.legacyChecksums().add(request.metadata());
       final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name());
       assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
       store.directory().sync(Collections.singleton(temporaryFileName));
       IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name());
       assert remove == null || remove == indexOutput; // remove maybe null if we got finished
     }
   }
   channel.sendResponse(TransportResponse.Empty.INSTANCE);
 }
 @BeforeMethod
 public void setUp() throws Exception {
   threadPool = new ThreadPool();
   store = createStore();
   store.deleteContent();
   storeReplica = createStoreReplica();
   storeReplica.deleteContent();
   engine = createEngine(store, createTranslog());
   engine.start();
   replicaEngine = createEngine(storeReplica, createTranslogReplica());
   replicaEngine.start();
 }
  @AfterMethod
  public void tearDown() throws Exception {
    replicaEngine.close();
    storeReplica.close();

    engine.close();
    store.close();

    if (threadPool != null) {
      threadPool.shutdownNow();
    }
  }
 void sendFiles(
     Store store,
     StoreFileMetaData[] files,
     Function<StoreFileMetaData, OutputStream> outputStreamFactory)
     throws Exception {
   store.incRef();
   try {
     ArrayUtil.timSort(
         files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first
     for (int i = 0; i < files.length; i++) {
       final StoreFileMetaData md = files[i];
       try (final IndexInput indexInput =
           store.directory().openInput(md.name(), IOContext.READONCE)) {
         // it's fine that we are only having the indexInput in the try/with block. The copy
         // methods handles
         // exceptions during close correctly and doesn't hide the original exception.
         Streams.copy(
             new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md));
       } catch (Exception e) {
         final IOException corruptIndexException;
         if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
           if (store.checkIntegrityNoException(md)
               == false) { // we are corrupted on the primary -- fail!
             logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md);
             failEngine(corruptIndexException);
             throw corruptIndexException;
           } else { // corruption has happened on the way to replica
             RemoteTransportException exception =
                 new RemoteTransportException(
                     "File corruption occurred on recovery but " + "checksums are ok", null);
             exception.addSuppressed(e);
             logger.warn(
                 (org.apache.logging.log4j.util.Supplier<?>)
                     () ->
                         new ParameterizedMessage(
                             "{} Remote file corruption on node {}, recovering {}. local checksum OK",
                             shardId,
                             request.targetNode(),
                             md),
                 corruptIndexException);
             throw exception;
           }
         } else {
           throw e;
         }
       }
     }
   } finally {
     store.decRef();
   }
 }
 private long newTransactionLogId() throws IOException {
   try {
     return IndexWriters.rollbackSegmentInfos(indexWriter).getVersion();
   } catch (Exception e) {
     return IndexReader.getCurrentVersion(store.directory());
   }
 }
 private void closeShard(
     String reason, ShardId sId, IndexShard indexShard, Store store, IndexEventListener listener) {
   final int shardId = sId.id();
   final Settings indexSettings = this.getIndexSettings().getSettings();
   try {
     try {
       listener.beforeIndexShardClosed(sId, indexShard, indexSettings);
     } finally {
       // this logic is tricky, we want to close the engine so we rollback the changes done to it
       // and close the shard so no operations are allowed to it
       if (indexShard != null) {
         try {
           // only flush we are we closed (closed index or shutdown) and if we are not deleted
           final boolean flushEngine = deleted.get() == false && closed.get();
           indexShard.close(reason, flushEngine);
         } catch (Exception e) {
           logger.debug("[{}] failed to close index shard", e, shardId);
           // ignore
         }
       }
       // call this before we close the store, so we can release resources for it
       listener.afterIndexShardClosed(sId, indexShard, indexSettings);
     }
   } finally {
     try {
       store.close();
     } catch (Exception e) {
       logger.warn(
           "[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason);
     }
   }
 }
 private StoreFilesMetaData listStoreMetaData(ShardId shardId) throws IOException {
   logger.trace("listing store meta data for {}", shardId);
   long startTimeNS = System.nanoTime();
   boolean exists = false;
   try {
     IndexService indexService = indicesService.indexService(shardId.index().name());
     if (indexService != null) {
       IndexShard indexShard = indexService.shard(shardId.id());
       if (indexShard != null) {
         final Store store = indexShard.store();
         store.incRef();
         try {
           exists = true;
           return new StoreFilesMetaData(true, shardId, store.getMetadataOrEmpty());
         } finally {
           store.decRef();
         }
       }
     }
     // try and see if we an list unallocated
     IndexMetaData metaData = clusterService.state().metaData().index(shardId.index().name());
     if (metaData == null) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     String storeType = metaData.getSettings().get(IndexStoreModule.STORE_TYPE, "fs");
     if (!storeType.contains("fs")) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     final ShardPath shardPath =
         ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.getSettings());
     if (shardPath == null) {
       return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
     }
     return new StoreFilesMetaData(
         false, shardId, Store.readMetadataSnapshot(shardPath.resolveIndex(), logger));
   } finally {
     TimeValue took = new TimeValue(System.nanoTime() - startTimeNS, TimeUnit.NANOSECONDS);
     if (exists) {
       logger.debug("{} loaded store meta data (took [{}])", shardId, took);
     } else {
       logger.trace("{} didn't find any store meta data to load (took [{}])", shardId, took);
     }
   }
 }
  public static void main(String[] args) throws Exception {
    ShardId shardId = new ShardId(new Index("index"), 1);
    Settings settings = EMPTY_SETTINGS;

    //        Store store = new RamStore(shardId, settings);
    Store store = new ByteBufferStore(shardId, settings, null, new ByteBufferCache(settings));
    //        Store store = new NioFsStore(shardId, settings);

    store.deleteContent();

    ThreadPool threadPool = new ScalingThreadPool();
    SnapshotDeletionPolicy deletionPolicy =
        new SnapshotDeletionPolicy(new KeepOnlyLastDeletionPolicy(shardId, settings));
    Engine engine =
        new RobinEngine(
            shardId,
            settings,
            store,
            deletionPolicy,
            new MemoryTranslog(shardId, settings),
            new LogByteSizeMergePolicyProvider(store),
            new ConcurrentMergeSchedulerProvider(shardId, settings),
            new AnalysisService(shardId.index()),
            new SimilarityService(shardId.index()));
    engine.start();

    SimpleEngineBenchmark benchmark =
        new SimpleEngineBenchmark(store, engine)
            .numberOfContentItems(1000)
            .searcherThreads(50)
            .searcherIterations(10000)
            .writerThreads(10)
            .writerIterations(10000)
            .refreshSchedule(new TimeValue(1, TimeUnit.SECONDS))
            .flushSchedule(new TimeValue(1, TimeUnit.MINUTES))
            .create(false)
            .build();

    benchmark.run();

    engine.close();
    store.close();
    threadPool.shutdown();
  }
 @Before
 public void setUp() throws Exception {
   super.setUp();
   defaultSettings =
       ImmutableSettings.builder()
           .put(RobinEngine.INDEX_COMPOUND_ON_FLUSH, getRandom().nextBoolean())
           .build(); // TODO randomize more settings
   threadPool = new ThreadPool();
   store = createStore();
   store.deleteContent();
   storeReplica = createStoreReplica();
   storeReplica.deleteContent();
   engineSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
   engine = createEngine(engineSettingsService, store, createTranslog());
   engine.start();
   replicaSettingsService = new IndexSettingsService(shardId.index(), EMPTY_SETTINGS);
   replicaEngine = createEngine(replicaSettingsService, storeReplica, createTranslogReplica());
   replicaEngine.start();
 }
  public void run() throws Exception {
    for (Thread t : searcherThreads) {
      t.start();
    }
    for (Thread t : writerThreads) {
      t.start();
    }
    barrier1.await();

    Refresher refresher = new Refresher();
    scheduledExecutorService.scheduleWithFixedDelay(
        refresher, refreshSchedule.millis(), refreshSchedule.millis(), TimeUnit.MILLISECONDS);
    Flusher flusher = new Flusher();
    scheduledExecutorService.scheduleWithFixedDelay(
        flusher, flushSchedule.millis(), flushSchedule.millis(), TimeUnit.MILLISECONDS);

    StopWatch stopWatch = new StopWatch();
    stopWatch.start();
    barrier2.await();

    latch.await();
    stopWatch.stop();

    System.out.println("Summary");
    System.out.println(
        "   -- Readers ["
            + searcherThreads.length
            + "] with ["
            + searcherIterations
            + "] iterations");
    System.out.println(
        "   -- Writers [" + writerThreads.length + "] with [" + writerIterations + "] iterations");
    System.out.println("   -- Took: " + stopWatch.totalTime());
    System.out.println(
        "   -- Refresh [" + refresher.id + "] took: " + refresher.stopWatch.totalTime());
    System.out.println("   -- Flush [" + flusher.id + "] took: " + flusher.stopWatch.totalTime());
    System.out.println("   -- Store size " + store.estimateSize());

    scheduledExecutorService.shutdown();

    engine.refresh(new Engine.Refresh(true));
    stopWatch = new StopWatch();
    stopWatch.start();
    Engine.Searcher searcher = engine.searcher();
    TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), idGenerator.get() + 1);
    stopWatch.stop();
    System.out.println(
        "   -- Indexed ["
            + idGenerator.get()
            + "] docs, found ["
            + topDocs.totalHits
            + "] hits, took "
            + stopWatch.totalTime());
    searcher.release();
  }
 public static void checkIndex(ESLogger logger, Store store, ShardId shardId) {
   if (store.tryIncRef()) {
     logger.info("start check index");
     try {
       Directory dir = store.directory();
       if (!Lucene.indexExists(dir)) {
         return;
       }
       if (IndexWriter.isLocked(dir)) {
         ESTestCase.checkIndexFailed = true;
         throw new IllegalStateException("IndexWriter is still open on shard " + shardId);
       }
       try (CheckIndex checkIndex = new CheckIndex(dir)) {
         BytesStreamOutput os = new BytesStreamOutput();
         PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
         checkIndex.setInfoStream(out);
         out.flush();
         CheckIndex.Status status = checkIndex.checkIndex();
         if (!status.clean) {
           ESTestCase.checkIndexFailed = true;
           logger.warn(
               "check index [failure] index files={}\n{}",
               Arrays.toString(dir.listAll()),
               new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           throw new IOException("index check failure");
         } else {
           if (logger.isDebugEnabled()) {
             logger.debug(
                 "check index [success]\n{}",
                 new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           }
         }
       }
     } catch (Exception e) {
       logger.warn("failed to check index", e);
     } finally {
       logger.info("end check index");
       store.decRef();
     }
   }
 }
Exemple #18
0
 @Test
 public void testVerifyingIndexOutput() throws IOException {
   Directory dir = newDirectory();
   IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
   int iters = scaledRandomIntBetween(10, 100);
   for (int i = 0; i < iters; i++) {
     BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
     output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
   }
   CodecUtil.writeFooter(output);
   output.close();
   IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
   String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput));
   indexInput.seek(0);
   BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024));
   long length = indexInput.length();
   IndexOutput verifyingOutput =
       new Store.LuceneVerifyingIndexOutput(
           new StoreFileMetaData("foo1.bar", length, checksum),
           dir.createOutput("foo1.bar", IOContext.DEFAULT));
   while (length > 0) {
     if (random().nextInt(10) == 0) {
       verifyingOutput.writeByte(indexInput.readByte());
       length--;
     } else {
       int min = (int) Math.min(length, ref.bytes.length);
       indexInput.readBytes(ref.bytes, ref.offset, min);
       verifyingOutput.writeBytes(ref.bytes, ref.offset, min);
       length -= min;
     }
   }
   Store.verify(verifyingOutput);
   verifyingOutput.writeByte((byte) 0x0);
   try {
     Store.verify(verifyingOutput);
     fail("should be a corrupted index");
   } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
     // ok
   }
   IOUtils.close(indexInput, verifyingOutput, dir);
 }
  @Inject
  public TieredMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
    super(store.shardId(), store.indexSettings());
    this.indexSettingsService = indexSettingsService;

    this.compoundFormat =
        indexSettings.getAsBoolean("index.compound_format", store.suggestUseCompoundFile());
    this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
    this.forceMergeDeletesPctAllowed =
        componentSettings.getAsDouble("expunge_deletes_allowed", 10d); // percentage
    this.floorSegment =
        componentSettings.getAsBytesSize("floor_segment", new ByteSizeValue(2, ByteSizeUnit.MB));
    this.maxMergeAtOnce = componentSettings.getAsInt("max_merge_at_once", 10);
    this.maxMergeAtOnceExplicit = componentSettings.getAsInt("max_merge_at_once_explicit", 30);
    // TODO is this really a good default number for max_merge_segment, what happens for large
    // indices, won't they end up with many segments?
    this.maxMergedSegment =
        componentSettings.getAsBytesSize(
            "max_merged_segment",
            componentSettings.getAsBytesSize(
                "max_merge_segment", new ByteSizeValue(5, ByteSizeUnit.GB)));
    this.segmentsPerTier = componentSettings.getAsDouble("segments_per_tier", 10.0d);
    this.reclaimDeletesWeight = componentSettings.getAsDouble("reclaim_deletes_weight", 2.0d);

    fixSettingsIfNeeded();

    logger.debug(
        "using [tiered] merge policy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], reclaim_deletes_weight[{}], async_merge[{}]",
        forceMergeDeletesPctAllowed,
        floorSegment,
        maxMergeAtOnce,
        maxMergeAtOnceExplicit,
        maxMergedSegment,
        segmentsPerTier,
        reclaimDeletesWeight,
        asyncMerge);

    indexSettingsService.addListener(applySettings);
  }
Exemple #20
0
  @Test
  public void testVerifyingIndexInput() throws IOException {
    Directory dir = newDirectory();
    IndexOutput output = dir.createOutput("foo.bar", IOContext.DEFAULT);
    int iters = scaledRandomIntBetween(10, 100);
    for (int i = 0; i < iters; i++) {
      BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
      output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
    }
    CodecUtil.writeFooter(output);
    output.close();

    // Check file
    IndexInput indexInput = dir.openInput("foo.bar", IOContext.DEFAULT);
    long checksum = CodecUtil.retrieveChecksum(indexInput);
    indexInput.seek(0);
    IndexInput verifyingIndexInput =
        new Store.VerifyingIndexInput(dir.openInput("foo.bar", IOContext.DEFAULT));
    readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
    Store.verify(verifyingIndexInput);
    assertThat(checksum, equalTo(((ChecksumIndexInput) verifyingIndexInput).getChecksum()));
    IOUtils.close(indexInput, verifyingIndexInput);

    // Corrupt file and check again
    corruptFile(dir, "foo.bar", "foo1.bar");
    verifyingIndexInput =
        new Store.VerifyingIndexInput(dir.openInput("foo1.bar", IOContext.DEFAULT));
    readIndexInputFullyWithRandomSeeks(verifyingIndexInput);
    try {
      Store.verify(verifyingIndexInput);
      fail("should be a corrupted index");
    } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
      // ok
    }
    IOUtils.close(verifyingIndexInput);
    IOUtils.close(dir);
  }
Exemple #21
0
 public static void assertConsistent(Store store, Store.MetadataSnapshot metadata)
     throws IOException {
   for (String file : store.directory().listAll()) {
     if (!IndexWriter.WRITE_LOCK_NAME.equals(file)
         && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file)
         && !Store.isChecksum(file)) {
       assertTrue(
           file
               + " is not in the map: "
               + metadata.asMap().size()
               + " vs. "
               + store.directory().listAll().length,
           metadata.asMap().containsKey(file));
     } else {
       assertFalse(
           file
               + " is not in the map: "
               + metadata.asMap().size()
               + " vs. "
               + store.directory().listAll().length,
           metadata.asMap().containsKey(file));
     }
   }
 }
Exemple #22
0
  @Test
  public void testMixedChecksums() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    // this time random codec....
    IndexWriter writer =
        new IndexWriter(
            store.directory(),
            newIndexWriterConfig(random(), new MockAnalyzer(random()))
                .setCodec(actualDefaultCodec()));
    int docs = 1 + random().nextInt(100);

    for (int i = 0; i < docs; i++) {
      Document doc = new Document();
      doc.add(
          new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }
    if (random().nextBoolean()) {
      for (int i = 0; i < docs; i++) {
        if (random().nextBoolean()) {
          Document doc = new Document();
          doc.add(
              new TextField(
                  "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          doc.add(
              new TextField(
                  "body",
                  TestUtil.randomRealisticUnicodeString(random()),
                  random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          writer.updateDocument(new Term("id", "" + i), doc);
        }
      }
    }
    if (random().nextBoolean()) {
      DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
    }
    Store.MetadataSnapshot metadata;
    // check before we committed
    try {
      store.getMetadata();
      fail("no index present - expected exception");
    } catch (IndexNotFoundException ex) {
      // expected
    }
    assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
    writer.commit();
    writer.close();
    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    metadata = store.getMetadata();
    assertThat(metadata.asMap().isEmpty(), is(false));
    for (StoreFileMetaData meta : metadata) {
      try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
        if (meta.checksum() == null) {
          String checksum = null;
          try {
            CodecUtil.retrieveChecksum(input);
            fail("expected a corrupt index - posting format has not checksums");
          } catch (CorruptIndexException
              | IndexFormatTooOldException
              | IndexFormatTooNewException ex) {
            try (ChecksumIndexInput checksumIndexInput =
                store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
              checksumIndexInput.seek(meta.length());
              checksum = Store.digestToString(checksumIndexInput.getChecksum());
            }
            // fine - it's a postings format without checksums
            checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
          }
        } else {
          String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
          assertThat(
              "File: " + meta.name() + " has a different checksum",
              meta.checksum(),
              equalTo(checksum));
          assertThat(meta.hasLegacyChecksum(), equalTo(false));
          assertThat(meta.writtenBy(), equalTo(Version.LATEST));
        }
      }
    }
    assertConsistent(store, metadata);
    checksums.write(store);
    metadata = store.getMetadata();
    assertThat(metadata.asMap().isEmpty(), is(false));
    for (StoreFileMetaData meta : metadata) {
      assertThat(
          "file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
      if (meta.hasLegacyChecksum()) {
        try (ChecksumIndexInput checksumIndexInput =
            store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
          checksumIndexInput.seek(meta.length());
          assertThat(
              meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
        }
      } else {
        try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
          String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
          assertThat(
              "File: " + meta.name() + " has a different checksum",
              meta.checksum(),
              equalTo(checksum));
          assertThat(meta.hasLegacyChecksum(), equalTo(false));
          assertThat(meta.writtenBy(), equalTo(Version.LATEST));
        }
      }
    }
    assertConsistent(store, metadata);
    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
  }
Exemple #23
0
  @Test
  public void testRenameFile() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    {
      IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
      }
      CodecUtil.writeFooter(output);
      output.close();
    }
    store.renameFile("foo.bar", "bar.foo");
    assertThat(store.directory().listAll().length, is(1));
    final long lastChecksum;
    try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
      lastChecksum = CodecUtil.checksumEntireFile(input);
    }

    try {
      store.directory().openInput("foo.bar", IOContext.DEFAULT);
      fail("file was renamed");
    } catch (FileNotFoundException | NoSuchFileException ex) {
      // expected
    }
    {
      IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
      }
      CodecUtil.writeFooter(output);
      output.close();
    }
    DistributorDirectory distributorDirectory =
        DirectoryUtils.getLeaf(store.directory(), DistributorDirectory.class);
    if (distributorDirectory != null
        && distributorDirectory.getDirectory("foo.bar")
            != distributorDirectory.getDirectory("bar.foo")) {
      try {
        store.renameFile("foo.bar", "bar.foo");
        fail("target file already exists in a different directory");
      } catch (IOException ex) {
        // expected
      }

      try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
        assertThat(lastChecksum, equalTo(CodecUtil.checksumEntireFile(input)));
      }
      assertThat(store.directory().listAll().length, is(2));
      assertDeleteContent(store, directoryService);
      IOUtils.close(store);
    } else {
      store.renameFile("foo.bar", "bar.foo");
      assertThat(store.directory().listAll().length, is(1));
      assertDeleteContent(store, directoryService);
      IOUtils.close(store);
    }
  }
Exemple #24
0
  @Test
  public void testRefCount() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    int incs = randomIntBetween(1, 100);
    for (int i = 0; i < incs; i++) {
      if (randomBoolean()) {
        store.incRef();
      } else {
        assertTrue(store.tryIncRef());
      }
      store.ensureOpen();
    }

    for (int i = 0; i < incs; i++) {
      store.decRef();
      store.ensureOpen();
    }

    store.incRef();
    final AtomicBoolean called = new AtomicBoolean(false);
    store.close();
    for (int i = 0; i < incs; i++) {
      if (randomBoolean()) {
        store.incRef();
      } else {
        assertTrue(store.tryIncRef());
      }
      store.ensureOpen();
    }

    for (int i = 0; i < incs; i++) {
      store.decRef();
      store.ensureOpen();
    }

    store.decRef();
    assertThat(store.refCount(), Matchers.equalTo(0));
    assertFalse(store.tryIncRef());
    try {
      store.incRef();
      fail(" expected exception");
    } catch (AlreadyClosedException ex) {

    }
    try {
      store.ensureOpen();
      fail(" expected exception");
    } catch (AlreadyClosedException ex) {

    }
  }
Exemple #25
0
  public void testCheckIntegrity() throws IOException {
    Directory dir = newDirectory();
    long luceneFileLength = 0;

    try (IndexOutput output = dir.createOutput("lucene_checksum.bin", IOContext.DEFAULT)) {
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        luceneFileLength += bytesRef.length;
      }
      CodecUtil.writeFooter(output);
      luceneFileLength += CodecUtil.footerLength();
    }

    final Adler32 adler32 = new Adler32();
    long legacyFileLength = 0;
    try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) {
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        legacyFileLength += bytesRef.length;
      }
    }
    final long luceneChecksum;
    final long adler32LegacyChecksum = adler32.getValue();
    try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
      assertEquals(luceneFileLength, indexInput.length());
      luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
    }

    { // positive check
      StoreFileMetaData lucene =
          new StoreFileMetaData(
              "lucene_checksum.bin",
              luceneFileLength,
              Store.digestToString(luceneChecksum),
              Version.LUCENE_4_8_0);
      StoreFileMetaData legacy =
          new StoreFileMetaData(
              "legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
      assertTrue(legacy.hasLegacyChecksum());
      assertFalse(lucene.hasLegacyChecksum());
      assertTrue(Store.checkIntegrityNoException(lucene, dir));
      assertTrue(Store.checkIntegrityNoException(legacy, dir));
    }

    { // negative check - wrong checksum
      StoreFileMetaData lucene =
          new StoreFileMetaData(
              "lucene_checksum.bin",
              luceneFileLength,
              Store.digestToString(luceneChecksum + 1),
              Version.LUCENE_4_8_0);
      StoreFileMetaData legacy =
          new StoreFileMetaData(
              "legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1));
      assertTrue(legacy.hasLegacyChecksum());
      assertFalse(lucene.hasLegacyChecksum());
      assertFalse(Store.checkIntegrityNoException(lucene, dir));
      assertFalse(Store.checkIntegrityNoException(legacy, dir));
    }

    { // negative check - wrong length
      StoreFileMetaData lucene =
          new StoreFileMetaData(
              "lucene_checksum.bin",
              luceneFileLength + 1,
              Store.digestToString(luceneChecksum),
              Version.LUCENE_4_8_0);
      StoreFileMetaData legacy =
          new StoreFileMetaData(
              "legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum));
      assertTrue(legacy.hasLegacyChecksum());
      assertFalse(lucene.hasLegacyChecksum());
      assertFalse(Store.checkIntegrityNoException(lucene, dir));
      assertFalse(Store.checkIntegrityNoException(legacy, dir));
    }

    { // negative check - wrong file
      StoreFileMetaData lucene =
          new StoreFileMetaData(
              "legacy.bin",
              luceneFileLength,
              Store.digestToString(luceneChecksum),
              Version.LUCENE_4_8_0);
      StoreFileMetaData legacy =
          new StoreFileMetaData(
              "lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
      assertTrue(legacy.hasLegacyChecksum());
      assertFalse(lucene.hasLegacyChecksum());
      assertFalse(Store.checkIntegrityNoException(lucene, dir));
      assertFalse(Store.checkIntegrityNoException(legacy, dir));
    }
    dir.close();
  }
Exemple #26
0
  @Test
  public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      doc.add(
          new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      docs.add(doc);
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      Store store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      first = store.getMetadata();
      assertDeleteContent(store, directoryService);
      store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
      Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
      assertThat(second.get(md.name()), notNullValue());
      // si files are different - containing timestamps etc
      assertThat(second.get(md.name()).isSame(md), equalTo(false));
    }
    assertThat(diff.different.size(), equalTo(first.size()));
    assertThat(
        diff.identical.size(),
        equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc =
        new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
      if (md.name().endsWith(".liv")) {
        delFile = md;
        break;
      }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
      assertThat(
          afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
      // an entire segment must be missing (single doc segment got dropped)
      assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(
        true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(
              newCommitMetaData.size()
                  - 5)); // segments_N, del file, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
      assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
      assertThat(
          newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(0));
      assertThat(
          newCommitDiff.missing.size(),
          equalTo(
              4)); // an entire segment must be missing (single doc segment got dropped)  plus the
                   // commit is different
    }

    store.deleteContent();
    IOUtils.close(store);
  }
  private void deleteShard(
      int shardId, boolean delete, boolean snapshotGateway, boolean deleteGateway)
      throws ElasticSearchException {
    Injector shardInjector;
    IndexShard indexShard;
    synchronized (this) {
      Map<Integer, Injector> tmpShardInjectors = newHashMap(shardsInjectors);
      shardInjector = tmpShardInjectors.remove(shardId);
      if (shardInjector == null) {
        if (!delete) {
          return;
        }
        throw new IndexShardMissingException(new ShardId(index, shardId));
      }
      shardsInjectors = ImmutableMap.copyOf(tmpShardInjectors);
      if (delete) {
        logger.debug("deleting shard_id [{}]", shardId);
      }

      Map<Integer, IndexShard> tmpShardsMap = newHashMap(shards);
      indexShard = tmpShardsMap.remove(shardId);
      shards = ImmutableMap.copyOf(tmpShardsMap);
    }

    ShardId sId = new ShardId(index, shardId);

    indicesLifecycle.beforeIndexShardClosed(sId, indexShard, delete);

    for (Class<? extends CloseableIndexComponent> closeable : pluginsService.shardServices()) {
      try {
        shardInjector.getInstance(closeable).close(delete);
      } catch (Exception e) {
        logger.debug("failed to clean plugin shard service [{}]", e, closeable);
      }
    }

    try {
      // now we can close the translog service, we need to close it before the we close the shard
      shardInjector.getInstance(TranslogService.class).close();
    } catch (Exception e) {
      // ignore
    }

    // close shard actions
    if (indexShard != null) {
      shardInjector.getInstance(IndexShardManagement.class).close();
    }

    // this logic is tricky, we want to close the engine so we rollback the changes done to it
    // and close the shard so no operations are allowed to it
    if (indexShard != null) {
      indexShard.close();
    }
    try {
      shardInjector.getInstance(Engine.class).close();
    } catch (Exception e) {
      // ignore
    }

    try {
      // now, we can snapshot to the gateway, it will be only the translog
      if (snapshotGateway) {
        shardInjector.getInstance(IndexShardGatewayService.class).snapshotOnClose();
      }
    } catch (Exception e) {
      // ignore
    }
    try {
      shardInjector.getInstance(IndexShardGatewayService.class).close(deleteGateway);
    } catch (Exception e) {
      // ignore
    }
    try {
      // now we can close the translog
      shardInjector.getInstance(Translog.class).close(delete);
    } catch (Exception e) {
      // ignore
    }

    // call this before we close the store, so we can release resources for it
    indicesLifecycle.afterIndexShardClosed(sId, delete);

    // if we delete or have no gateway or the store is not persistent, clean the store...
    Store store = shardInjector.getInstance(Store.class);
    if (delete || indexGateway.type().equals(NoneGateway.TYPE) || !indexStore.persistent()) {
      try {
        store.fullDelete();
      } catch (IOException e) {
        logger.warn("failed to clean store on shard deletion", e);
      }
    }
    // and close it
    try {
      store.close();
    } catch (IOException e) {
      logger.warn("failed to close store on shard deletion", e);
    }

    Injectors.close(injector);

    // delete the shard location if needed
    if (delete || indexGateway.type().equals(NoneGateway.TYPE)) {
      FileSystemUtils.deleteRecursively(nodeEnv.shardLocation(sId));
    }
  }
  public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
    Settings settings =
        Settings.builder()
            .put("indices.recovery.concurrent_streams", 1)
            .put("indices.recovery.concurrent_small_file_streams", 1)
            .build();
    final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
    StartRecoveryRequest request =
        new StartRecoveryRequest(
            shardId,
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            null,
            RecoveryState.Type.STORE,
            randomLong());
    Path tempDir = createTempDir();
    Store store = newStore(tempDir, false);
    AtomicBoolean failedEngine = new AtomicBoolean(false);
    RecoverySourceHandler handler =
        new RecoverySourceHandler(null, request, recoverySettings, null, logger) {
          @Override
          protected void failEngine(IOException cause) {
            assertFalse(failedEngine.get());
            failedEngine.set(true);
          }
        };
    Directory dir = store.directory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    int numDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numDocs; i++) {
      Document document = new Document();
      document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
      document.add(
          newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
      writer.addDocument(document);
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot metadata = store.getMetadata();
    List<StoreFileMetaData> metas = new ArrayList<>();
    for (StoreFileMetaData md : metadata) {
      metas.add(md);
    }

    CorruptionUtils.corruptFile(
        getRandom(),
        FileSystemUtils.files(
            tempDir,
            (p) ->
                (p.getFileName().toString().equals("write.lock")
                        || p.getFileName().toString().startsWith("extra"))
                    == false));
    Store targetStore = newStore(createTempDir(), false);
    try {
      handler.sendFiles(
          store,
          metas.toArray(new StoreFileMetaData[0]),
          (md) -> {
            try {
              return new IndexOutputOutputStream(
                  targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
                @Override
                public void close() throws IOException {
                  super.close();
                  store
                      .directory()
                      .sync(
                          Collections.singleton(md.name())); // sync otherwise MDW will mess with it
                }
              };
            } catch (IOException e) {
              throw new RuntimeException(e);
            }
          });
      fail("corrupted index");
    } catch (IOException ex) {
      assertNotNull(ExceptionsHelper.unwrapCorruption(ex));
    }
    assertTrue(failedEngine.get());
    IOUtils.close(store, targetStore);
  }
  public void testHandleExceptinoOnSendSendFiles() throws Throwable {
    Settings settings =
        Settings.builder()
            .put("indices.recovery.concurrent_streams", 1)
            .put("indices.recovery.concurrent_small_file_streams", 1)
            .build();
    final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
    StartRecoveryRequest request =
        new StartRecoveryRequest(
            shardId,
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            null,
            RecoveryState.Type.STORE,
            randomLong());
    Path tempDir = createTempDir();
    Store store = newStore(tempDir, false);
    AtomicBoolean failedEngine = new AtomicBoolean(false);
    RecoverySourceHandler handler =
        new RecoverySourceHandler(null, request, recoverySettings, null, logger) {
          @Override
          protected void failEngine(IOException cause) {
            assertFalse(failedEngine.get());
            failedEngine.set(true);
          }
        };
    Directory dir = store.directory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    int numDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numDocs; i++) {
      Document document = new Document();
      document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
      document.add(
          newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
      writer.addDocument(document);
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot metadata = store.getMetadata();
    List<StoreFileMetaData> metas = new ArrayList<>();
    for (StoreFileMetaData md : metadata) {
      metas.add(md);
    }
    final boolean throwCorruptedIndexException = randomBoolean();
    Store targetStore = newStore(createTempDir(), false);
    try {
      handler.sendFiles(
          store,
          metas.toArray(new StoreFileMetaData[0]),
          (md) -> {
            if (throwCorruptedIndexException) {
              throw new RuntimeException(new CorruptIndexException("foo", "bar"));
            } else {
              throw new RuntimeException("boom");
            }
          });
      fail("exception index");
    } catch (RuntimeException ex) {
      assertNull(ExceptionsHelper.unwrapCorruption(ex));
      if (throwCorruptedIndexException) {
        assertEquals(
            ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]");
      } else {
        assertEquals(ex.getMessage(), "boom");
      }
    } catch (CorruptIndexException ex) {
      fail("not expected here");
    }
    assertFalse(failedEngine.get());
    IOUtils.close(store, targetStore);
  }
 public void testSendFiles() throws Throwable {
   Settings settings =
       Settings.builder()
           .put("indices.recovery.concurrent_streams", 1)
           .put("indices.recovery.concurrent_small_file_streams", 1)
           .build();
   final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
   StartRecoveryRequest request =
       new StartRecoveryRequest(
           shardId,
           new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
           new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
           null,
           RecoveryState.Type.STORE,
           randomLong());
   Store store = newStore(createTempDir());
   RecoverySourceHandler handler =
       new RecoverySourceHandler(null, request, recoverySettings, null, logger);
   Directory dir = store.directory();
   RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
   int numDocs = randomIntBetween(10, 100);
   for (int i = 0; i < numDocs; i++) {
     Document document = new Document();
     document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
     document.add(
         newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
     writer.addDocument(document);
   }
   writer.commit();
   Store.MetadataSnapshot metadata = store.getMetadata();
   List<StoreFileMetaData> metas = new ArrayList<>();
   for (StoreFileMetaData md : metadata) {
     metas.add(md);
   }
   Store targetStore = newStore(createTempDir());
   handler.sendFiles(
       store,
       metas.toArray(new StoreFileMetaData[0]),
       (md) -> {
         try {
           return new IndexOutputOutputStream(
               targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
             @Override
             public void close() throws IOException {
               super.close();
               store
                   .directory()
                   .sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it
             }
           };
         } catch (IOException e) {
           throw new RuntimeException(e);
         }
       });
   Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata();
   Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
   assertEquals(metas.size(), recoveryDiff.identical.size());
   assertEquals(0, recoveryDiff.different.size());
   assertEquals(0, recoveryDiff.missing.size());
   IndexReader reader = DirectoryReader.open(targetStore.directory());
   assertEquals(numDocs, reader.maxDoc());
   IOUtils.close(reader, writer, store, targetStore);
 }