Esempio n. 1
0
 private IndexWriter createWriter() throws IOException {
   IndexWriter indexWriter = null;
   try {
     // release locks when started
     if (IndexWriter.isLocked(store.directory())) {
       logger.warn("shard is locked, releasing lock");
       IndexWriter.unlock(store.directory());
     }
     boolean create = !IndexReader.indexExists(store.directory());
     indexWriter =
         new IndexWriter(
             store.directory(),
             analysisService.defaultIndexAnalyzer(),
             create,
             deletionPolicy,
             IndexWriter.MaxFieldLength.UNLIMITED);
     indexWriter.setMergeScheduler(mergeScheduler.newMergeScheduler());
     indexWriter.setMergePolicy(mergePolicyProvider.newMergePolicy(indexWriter));
     indexWriter.setSimilarity(similarityService.defaultIndexSimilarity());
     indexWriter.setRAMBufferSizeMB(indexingBufferSize.mbFrac());
     indexWriter.setTermIndexInterval(termIndexInterval);
   } catch (IOException e) {
     safeClose(indexWriter);
     throw e;
   }
   return indexWriter;
 }
Esempio n. 2
0
 @Override
 public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     final Store store = recoveryStatus.store();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     final RecoveryState.Index indexState = recoveryStatus.state().getIndex();
     if (request.sourceThrottleTimeInNanos() != RecoveryState.Index.UNKNOWN) {
       indexState.addSourceThrottling(request.sourceThrottleTimeInNanos());
     }
     IndexOutput indexOutput;
     if (request.position() == 0) {
       indexOutput =
           recoveryStatus.openAndPutIndexOutput(request.name(), request.metadata(), store);
     } else {
       indexOutput = recoveryStatus.getOpenIndexOutput(request.name());
     }
     BytesReference content = request.content();
     if (!content.hasArray()) {
       content = content.toBytesArray();
     }
     RateLimiter rl = recoverySettings.rateLimiter();
     if (rl != null) {
       long bytes = bytesSinceLastPause.addAndGet(content.length());
       if (bytes > rl.getMinPauseCheckBytes()) {
         // Time to pause
         bytesSinceLastPause.addAndGet(-bytes);
         long throttleTimeInNanos = rl.pause(bytes);
         indexState.addTargetThrottling(throttleTimeInNanos);
         recoveryStatus.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos);
       }
     }
     indexOutput.writeBytes(content.array(), content.arrayOffset(), content.length());
     indexState.addRecoveredBytesToFile(request.name(), content.length());
     if (indexOutput.getFilePointer() >= request.length() || request.lastChunk()) {
       try {
         Store.verify(indexOutput);
       } finally {
         // we are done
         indexOutput.close();
       }
       // write the checksum
       recoveryStatus.legacyChecksums().add(request.metadata());
       final String temporaryFileName = recoveryStatus.getTempNameForFile(request.name());
       assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
       store.directory().sync(Collections.singleton(temporaryFileName));
       IndexOutput remove = recoveryStatus.removeOpenIndexOutputs(request.name());
       assert remove == null || remove == indexOutput; // remove maybe null if we got finished
     }
   }
   channel.sendResponse(TransportResponse.Empty.INSTANCE);
 }
Esempio n. 3
0
 public void assertDeleteContent(Store store, DirectoryService service) throws IOException {
   store.deleteContent();
   assertThat(
       Arrays.toString(store.directory().listAll()),
       store.directory().listAll().length,
       equalTo(0));
   assertThat(store.stats().sizeInBytes(), equalTo(0l));
   for (Directory dir : service.build()) {
     assertThat(dir.listAll().length, equalTo(0));
   }
 }
Esempio n. 4
0
  @Test
  public void testCleanUpWithLegacyChecksums() throws IOException {
    Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
    metaDataMap.put(
        "segments_1",
        new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[] {1})));
    metaDataMap.put(
        "_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
    Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(metaDataMap);

    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    for (String file : metaDataMap.keySet()) {
      try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        CodecUtil.writeFooter(output);
      }
    }

    store.verifyAfterCleanup(snapshot, snapshot);
    store.deleteContent();
    IOUtils.close(store);
  }
Esempio n. 5
0
 private long newTransactionLogId() throws IOException {
   try {
     return IndexWriters.rollbackSegmentInfos(indexWriter).getVersion();
   } catch (Exception e) {
     return IndexReader.getCurrentVersion(store.directory());
   }
 }
Esempio n. 6
0
 @Override
 public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel)
     throws Exception {
   try (RecoveriesCollection.StatusRef statusRef =
       onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
     final RecoveryStatus recoveryStatus = statusRef.status();
     recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
     // first, we go and move files that were created with the recovery id suffix to
     // the actual names, its ok if we have a corrupted index here, since we have replicas
     // to recover from in case of a full cluster shutdown just when this code executes...
     recoveryStatus
         .indexShard()
         .deleteShardState(); // we have to delete it first since even if we fail to rename the
                              // shard might be invalid
     recoveryStatus.renameAllTempFiles();
     final Store store = recoveryStatus.store();
     // now write checksums
     recoveryStatus.legacyChecksums().write(store);
     Store.MetadataSnapshot sourceMetaData = request.sourceMetaSnapshot();
     try {
       store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
     } catch (CorruptIndexException
         | IndexFormatTooNewException
         | IndexFormatTooOldException ex) {
       // this is a fatal exception at this stage.
       // this means we transferred files from the remote that have not be checksummed and they
       // are
       // broken. We have to clean up this shard entirely, remove all files and bubble it up to
       // the
       // source shard since this index might be broken there as well? The Source can handle this
       // and checks
       // its content on disk if possible.
       try {
         try {
           store.removeCorruptionMarker();
         } finally {
           Lucene.cleanLuceneIndex(store.directory()); // clean up and delete all files
         }
       } catch (Throwable e) {
         logger.debug("Failed to clean lucene index", e);
         ex.addSuppressed(e);
       }
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     } catch (Exception ex) {
       RecoveryFailedException rfe =
           new RecoveryFailedException(
               recoveryStatus.state(), "failed to clean after recovery", ex);
       recoveryStatus.fail(rfe, true);
       throw rfe;
     }
     channel.sendResponse(TransportResponse.Empty.INSTANCE);
   }
 }
 void sendFiles(
     Store store,
     StoreFileMetaData[] files,
     Function<StoreFileMetaData, OutputStream> outputStreamFactory)
     throws Exception {
   store.incRef();
   try {
     ArrayUtil.timSort(
         files, (a, b) -> Long.compare(a.length(), b.length())); // send smallest first
     for (int i = 0; i < files.length; i++) {
       final StoreFileMetaData md = files[i];
       try (final IndexInput indexInput =
           store.directory().openInput(md.name(), IOContext.READONCE)) {
         // it's fine that we are only having the indexInput in the try/with block. The copy
         // methods handles
         // exceptions during close correctly and doesn't hide the original exception.
         Streams.copy(
             new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md));
       } catch (Exception e) {
         final IOException corruptIndexException;
         if ((corruptIndexException = ExceptionsHelper.unwrapCorruption(e)) != null) {
           if (store.checkIntegrityNoException(md)
               == false) { // we are corrupted on the primary -- fail!
             logger.warn("{} Corrupted file detected {} checksum mismatch", shardId, md);
             failEngine(corruptIndexException);
             throw corruptIndexException;
           } else { // corruption has happened on the way to replica
             RemoteTransportException exception =
                 new RemoteTransportException(
                     "File corruption occurred on recovery but " + "checksums are ok", null);
             exception.addSuppressed(e);
             logger.warn(
                 (org.apache.logging.log4j.util.Supplier<?>)
                     () ->
                         new ParameterizedMessage(
                             "{} Remote file corruption on node {}, recovering {}. local checksum OK",
                             shardId,
                             request.targetNode(),
                             md),
                 corruptIndexException);
             throw exception;
           }
         } else {
           throw e;
         }
       }
     }
   } finally {
     store.decRef();
   }
 }
Esempio n. 8
0
 public static void assertConsistent(Store store, Store.MetadataSnapshot metadata)
     throws IOException {
   for (String file : store.directory().listAll()) {
     if (!IndexWriter.WRITE_LOCK_NAME.equals(file)
         && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file)
         && !Store.isChecksum(file)) {
       assertTrue(
           file
               + " is not in the map: "
               + metadata.asMap().size()
               + " vs. "
               + store.directory().listAll().length,
           metadata.asMap().containsKey(file));
     } else {
       assertFalse(
           file
               + " is not in the map: "
               + metadata.asMap().size()
               + " vs. "
               + store.directory().listAll().length,
           metadata.asMap().containsKey(file));
     }
   }
 }
 public static void checkIndex(ESLogger logger, Store store, ShardId shardId) {
   if (store.tryIncRef()) {
     logger.info("start check index");
     try {
       Directory dir = store.directory();
       if (!Lucene.indexExists(dir)) {
         return;
       }
       if (IndexWriter.isLocked(dir)) {
         ESTestCase.checkIndexFailed = true;
         throw new IllegalStateException("IndexWriter is still open on shard " + shardId);
       }
       try (CheckIndex checkIndex = new CheckIndex(dir)) {
         BytesStreamOutput os = new BytesStreamOutput();
         PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
         checkIndex.setInfoStream(out);
         out.flush();
         CheckIndex.Status status = checkIndex.checkIndex();
         if (!status.clean) {
           ESTestCase.checkIndexFailed = true;
           logger.warn(
               "check index [failure] index files={}\n{}",
               Arrays.toString(dir.listAll()),
               new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           throw new IOException("index check failure");
         } else {
           if (logger.isDebugEnabled()) {
             logger.debug(
                 "check index [success]\n{}",
                 new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
           }
         }
       }
     } catch (Exception e) {
       logger.warn("failed to check index", e);
     } finally {
       logger.info("end check index");
       store.decRef();
     }
   }
 }
Esempio n. 10
0
  @Test
  public void testCleanupFromSnapshot() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    // this time random codec....
    IndexWriterConfig indexWriterConfig =
        newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(actualDefaultCodec());
    // we keep all commits and that allows us clean based on multiple snapshots
    indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
    int docs = 1 + random().nextInt(100);
    int numCommits = 0;
    for (int i = 0; i < docs; i++) {
      if (i > 0 && randomIntBetween(0, 10) == 0) {
        writer.commit();
        numCommits++;
      }
      Document doc = new Document();
      doc.add(
          new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }
    if (numCommits < 1) {
      writer.commit();
      Document doc = new Document();
      doc.add(
          new TextField(
              "id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }

    Store.MetadataSnapshot firstMeta = store.getMetadata();

    if (random().nextBoolean()) {
      for (int i = 0; i < docs; i++) {
        if (random().nextBoolean()) {
          Document doc = new Document();
          doc.add(
              new TextField(
                  "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          doc.add(
              new TextField(
                  "body",
                  TestUtil.randomRealisticUnicodeString(random()),
                  random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          writer.updateDocument(new Term("id", "" + i), doc);
        }
      }
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot secondMeta = store.getMetadata();

    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
    for (String file : store.directory().listAll()) {
      if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
        continue;
      }
      BytesRef hash = new BytesRef();
      if (file.startsWith("segments")) {
        hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
      }
      StoreFileMetaData storeFileMetaData =
          new StoreFileMetaData(
              file, store.directory().fileLength(file), file + "checksum", null, hash);
      legacyMeta.put(file, storeFileMetaData);
      checksums.add(storeFileMetaData);
    }
    checksums.write(
        store); // write one checksum file here - we expect it to survive all the cleanups

    if (randomBoolean()) {
      store.cleanupAndVerify("test", firstMeta);
      String[] strings = store.directory().listAll();
      int numChecksums = 0;
      int numNotFound = 0;
      for (String file : strings) {
        assertTrue(firstMeta.contains(file) || Store.isChecksum(file));
        if (Store.isChecksum(file)) {
          numChecksums++;
        } else if (secondMeta.contains(file) == false) {
          numNotFound++;
        }
      }
      assertTrue(
          "at least one file must not be in here since we have two commits?", numNotFound > 0);
      assertEquals(
          "we wrote one checksum but it's gone now? - checksums are supposed to be kept",
          numChecksums,
          1);
    } else {
      store.cleanupAndVerify("test", secondMeta);
      String[] strings = store.directory().listAll();
      int numChecksums = 0;
      int numNotFound = 0;
      for (String file : strings) {
        assertTrue(secondMeta.contains(file) || Store.isChecksum(file));
        if (Store.isChecksum(file)) {
          numChecksums++;
        } else if (firstMeta.contains(file) == false) {
          numNotFound++;
        }
      }
      assertTrue(
          "at least one file must not be in here since we have two commits?", numNotFound > 0);
      assertEquals(
          "we wrote one checksum but it's gone now? - checksums are supposed to be kept",
          numChecksums,
          1);
    }

    store.deleteContent();
    IOUtils.close(store);
  }
 public void testSendFiles() throws Throwable {
   Settings settings =
       Settings.builder()
           .put("indices.recovery.concurrent_streams", 1)
           .put("indices.recovery.concurrent_small_file_streams", 1)
           .build();
   final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
   StartRecoveryRequest request =
       new StartRecoveryRequest(
           shardId,
           new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
           new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
           null,
           RecoveryState.Type.STORE,
           randomLong());
   Store store = newStore(createTempDir());
   RecoverySourceHandler handler =
       new RecoverySourceHandler(null, request, recoverySettings, null, logger);
   Directory dir = store.directory();
   RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
   int numDocs = randomIntBetween(10, 100);
   for (int i = 0; i < numDocs; i++) {
     Document document = new Document();
     document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
     document.add(
         newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
     writer.addDocument(document);
   }
   writer.commit();
   Store.MetadataSnapshot metadata = store.getMetadata();
   List<StoreFileMetaData> metas = new ArrayList<>();
   for (StoreFileMetaData md : metadata) {
     metas.add(md);
   }
   Store targetStore = newStore(createTempDir());
   handler.sendFiles(
       store,
       metas.toArray(new StoreFileMetaData[0]),
       (md) -> {
         try {
           return new IndexOutputOutputStream(
               targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
             @Override
             public void close() throws IOException {
               super.close();
               store
                   .directory()
                   .sync(Collections.singleton(md.name())); // sync otherwise MDW will mess with it
             }
           };
         } catch (IOException e) {
           throw new RuntimeException(e);
         }
       });
   Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata();
   Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata);
   assertEquals(metas.size(), recoveryDiff.identical.size());
   assertEquals(0, recoveryDiff.different.size());
   assertEquals(0, recoveryDiff.missing.size());
   IndexReader reader = DirectoryReader.open(targetStore.directory());
   assertEquals(numDocs, reader.maxDoc());
   IOUtils.close(reader, writer, store, targetStore);
 }
  public void testHandleExceptinoOnSendSendFiles() throws Throwable {
    Settings settings =
        Settings.builder()
            .put("indices.recovery.concurrent_streams", 1)
            .put("indices.recovery.concurrent_small_file_streams", 1)
            .build();
    final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
    StartRecoveryRequest request =
        new StartRecoveryRequest(
            shardId,
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            null,
            RecoveryState.Type.STORE,
            randomLong());
    Path tempDir = createTempDir();
    Store store = newStore(tempDir, false);
    AtomicBoolean failedEngine = new AtomicBoolean(false);
    RecoverySourceHandler handler =
        new RecoverySourceHandler(null, request, recoverySettings, null, logger) {
          @Override
          protected void failEngine(IOException cause) {
            assertFalse(failedEngine.get());
            failedEngine.set(true);
          }
        };
    Directory dir = store.directory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    int numDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numDocs; i++) {
      Document document = new Document();
      document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
      document.add(
          newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
      writer.addDocument(document);
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot metadata = store.getMetadata();
    List<StoreFileMetaData> metas = new ArrayList<>();
    for (StoreFileMetaData md : metadata) {
      metas.add(md);
    }
    final boolean throwCorruptedIndexException = randomBoolean();
    Store targetStore = newStore(createTempDir(), false);
    try {
      handler.sendFiles(
          store,
          metas.toArray(new StoreFileMetaData[0]),
          (md) -> {
            if (throwCorruptedIndexException) {
              throw new RuntimeException(new CorruptIndexException("foo", "bar"));
            } else {
              throw new RuntimeException("boom");
            }
          });
      fail("exception index");
    } catch (RuntimeException ex) {
      assertNull(ExceptionsHelper.unwrapCorruption(ex));
      if (throwCorruptedIndexException) {
        assertEquals(
            ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]");
      } else {
        assertEquals(ex.getMessage(), "boom");
      }
    } catch (CorruptIndexException ex) {
      fail("not expected here");
    }
    assertFalse(failedEngine.get());
    IOUtils.close(store, targetStore);
  }
  public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {
    Settings settings =
        Settings.builder()
            .put("indices.recovery.concurrent_streams", 1)
            .put("indices.recovery.concurrent_small_file_streams", 1)
            .build();
    final RecoverySettings recoverySettings = new RecoverySettings(settings, service);
    StartRecoveryRequest request =
        new StartRecoveryRequest(
            shardId,
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
            null,
            RecoveryState.Type.STORE,
            randomLong());
    Path tempDir = createTempDir();
    Store store = newStore(tempDir, false);
    AtomicBoolean failedEngine = new AtomicBoolean(false);
    RecoverySourceHandler handler =
        new RecoverySourceHandler(null, request, recoverySettings, null, logger) {
          @Override
          protected void failEngine(IOException cause) {
            assertFalse(failedEngine.get());
            failedEngine.set(true);
          }
        };
    Directory dir = store.directory();
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());
    int numDocs = randomIntBetween(10, 100);
    for (int i = 0; i < numDocs; i++) {
      Document document = new Document();
      document.add(new StringField("id", Integer.toString(i), Field.Store.YES));
      document.add(
          newField("field", randomUnicodeOfCodepointLengthBetween(1, 10), TextField.TYPE_STORED));
      writer.addDocument(document);
    }
    writer.commit();
    writer.close();

    Store.MetadataSnapshot metadata = store.getMetadata();
    List<StoreFileMetaData> metas = new ArrayList<>();
    for (StoreFileMetaData md : metadata) {
      metas.add(md);
    }

    CorruptionUtils.corruptFile(
        getRandom(),
        FileSystemUtils.files(
            tempDir,
            (p) ->
                (p.getFileName().toString().equals("write.lock")
                        || p.getFileName().toString().startsWith("extra"))
                    == false));
    Store targetStore = newStore(createTempDir(), false);
    try {
      handler.sendFiles(
          store,
          metas.toArray(new StoreFileMetaData[0]),
          (md) -> {
            try {
              return new IndexOutputOutputStream(
                  targetStore.createVerifyingOutput(md.name(), md, IOContext.DEFAULT)) {
                @Override
                public void close() throws IOException {
                  super.close();
                  store
                      .directory()
                      .sync(
                          Collections.singleton(md.name())); // sync otherwise MDW will mess with it
                }
              };
            } catch (IOException e) {
              throw new RuntimeException(e);
            }
          });
      fail("corrupted index");
    } catch (IOException ex) {
      assertNotNull(ExceptionsHelper.unwrapCorruption(ex));
    }
    assertTrue(failedEngine.get());
    IOUtils.close(store, targetStore);
  }
Esempio n. 14
0
  @Test
  public void testRecoveryDiff() throws IOException, InterruptedException {
    int numDocs = 2 + random().nextInt(100);
    List<Document> docs = new ArrayList<>();
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      doc.add(
          new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      docs.add(doc);
    }
    long seed = random().nextLong();
    Store.MetadataSnapshot first;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      Store store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      first = store.getMetadata();
      assertDeleteContent(store, directoryService);
      store.close();
    }
    long time = new Date().getTime();
    while (time == new Date().getTime()) {
      Thread.sleep(10); // bump the time
    }
    Store.MetadataSnapshot second;
    Store store;
    {
      Random random = new Random(seed);
      IndexWriterConfig iwc =
          new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
      iwc.setMergePolicy(NoMergePolicy.INSTANCE);
      iwc.setUseCompoundFile(random.nextBoolean());
      iwc.setMaxThreadStates(1);
      final ShardId shardId = new ShardId(new Index("index"), 1);
      DirectoryService directoryService = new LuceneManagedDirectoryService(random);
      store =
          new Store(
              shardId,
              ImmutableSettings.EMPTY,
              directoryService,
              randomDistributor(random, directoryService),
              new DummyShardLock(shardId));
      IndexWriter writer = new IndexWriter(store.directory(), iwc);
      final boolean lotsOfSegments = rarely(random);
      for (Document d : docs) {
        writer.addDocument(d);
        if (lotsOfSegments && random.nextBoolean()) {
          writer.commit();
        } else if (rarely(random)) {
          writer.commit();
        }
      }
      writer.commit();
      writer.close();
      second = store.getMetadata();
    }
    Store.RecoveryDiff diff = first.recoveryDiff(second);
    assertThat(first.size(), equalTo(second.size()));
    for (StoreFileMetaData md : first) {
      assertThat(second.get(md.name()), notNullValue());
      // si files are different - containing timestamps etc
      assertThat(second.get(md.name()).isSame(md), equalTo(false));
    }
    assertThat(diff.different.size(), equalTo(first.size()));
    assertThat(
        diff.identical.size(),
        equalTo(0)); // in lucene 5 nothing is identical - we use random ids in file headers
    assertThat(diff.missing, empty());

    // check the self diff
    Store.RecoveryDiff selfDiff = first.recoveryDiff(first);
    assertThat(selfDiff.identical.size(), equalTo(first.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // lets add some deletes
    Random random = new Random(seed);
    IndexWriterConfig iwc =
        new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(random.nextBoolean());
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    IndexWriter writer = new IndexWriter(store.directory(), iwc);
    writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs))));
    writer.commit();
    writer.close();
    Store.MetadataSnapshot metadata = store.getMetadata();
    StoreFileMetaData delFile = null;
    for (StoreFileMetaData md : metadata) {
      if (md.name().endsWith(".liv")) {
        delFile = md;
        break;
      }
    }
    Store.RecoveryDiff afterDeleteDiff = metadata.recoveryDiff(second);
    if (delFile != null) {
      assertThat(
          afterDeleteDiff.identical.size(), equalTo(metadata.size() - 2)); // segments_N + del file
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(2));
    } else {
      // an entire segment must be missing (single doc segment got dropped)
      assertThat(afterDeleteDiff.identical.size(), greaterThan(0));
      assertThat(afterDeleteDiff.different.size(), equalTo(0));
      assertThat(afterDeleteDiff.missing.size(), equalTo(1)); // the commit file is different
    }

    // check the self diff
    selfDiff = metadata.recoveryDiff(metadata);
    assertThat(selfDiff.identical.size(), equalTo(metadata.size()));
    assertThat(selfDiff.different, empty());
    assertThat(selfDiff.missing, empty());

    // add a new commit
    iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(actualDefaultCodec());
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    iwc.setUseCompoundFile(
        true); // force CFS - easier to test here since we know it will add 3 files
    iwc.setMaxThreadStates(1);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND);
    writer = new IndexWriter(store.directory(), iwc);
    writer.addDocument(docs.get(0));
    writer.close();

    Store.MetadataSnapshot newCommitMetaData = store.getMetadata();
    Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
    if (delFile != null) {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(
              newCommitMetaData.size()
                  - 5)); // segments_N, del file, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
      assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
      assertThat(
          newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
    } else {
      assertThat(
          newCommitDiff.identical.size(),
          equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
      assertThat(newCommitDiff.different.size(), equalTo(0));
      assertThat(
          newCommitDiff.missing.size(),
          equalTo(
              4)); // an entire segment must be missing (single doc segment got dropped)  plus the
                   // commit is different
    }

    store.deleteContent();
    IOUtils.close(store);
  }
  private void recoverIndex(CommitPoint commitPoint, ImmutableMap<String, BlobMetaData> blobs)
      throws Exception {
    int numberOfFiles = 0;
    long totalSize = 0;
    int numberOfReusedFiles = 0;
    long reusedTotalSize = 0;

    List<CommitPoint.FileInfo> filesToRecover = Lists.newArrayList();
    for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) {
      String fileName = fileInfo.physicalName();
      StoreFileMetaData md = null;
      try {
        md = store.metaData(fileName);
      } catch (Exception e) {
        // no file
      }
      // we don't compute checksum for segments, so always recover them
      if (!fileName.startsWith("segments") && md != null && fileInfo.isSame(md)) {
        numberOfFiles++;
        totalSize += md.length();
        numberOfReusedFiles++;
        reusedTotalSize += md.length();
        if (logger.isTraceEnabled()) {
          logger.trace(
              "not_recovering [{}], exists in local store and is same", fileInfo.physicalName());
        }
      } else {
        if (logger.isTraceEnabled()) {
          if (md == null) {
            logger.trace(
                "recovering [{}], does not exists in local store", fileInfo.physicalName());
          } else {
            logger.trace(
                "recovering [{}], exists in local store but is different", fileInfo.physicalName());
          }
        }
        numberOfFiles++;
        totalSize += fileInfo.length();
        filesToRecover.add(fileInfo);
      }
    }

    recoveryStatus.index().files(numberOfFiles, totalSize, numberOfReusedFiles, reusedTotalSize);
    if (filesToRecover.isEmpty()) {
      logger.trace("no files to recover, all exists within the local store");
    }

    if (logger.isTraceEnabled()) {
      logger.trace(
          "recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]",
          numberOfFiles,
          new ByteSizeValue(totalSize),
          numberOfReusedFiles,
          new ByteSizeValue(reusedTotalSize));
    }

    final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
    final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<Throwable>();

    for (final CommitPoint.FileInfo fileToRecover : filesToRecover) {
      recoverFile(fileToRecover, blobs, latch, failures);
    }

    try {
      latch.await();
    } catch (InterruptedException e) {
      throw new IndexShardGatewayRecoveryException(
          shardId, "Interrupted while recovering index", e);
    }

    if (!failures.isEmpty()) {
      throw new IndexShardGatewayRecoveryException(
          shardId, "Failed to recover index", failures.get(0));
    }

    // read the gateway data persisted
    long version = -1;
    try {
      if (IndexReader.indexExists(store.directory())) {
        version = IndexReader.getCurrentVersion(store.directory());
      }
    } catch (IOException e) {
      throw new IndexShardGatewayRecoveryException(
          shardId(), "Failed to fetch index version after copying it over", e);
    }
    recoveryStatus.index().updateVersion(version);

    /// now, go over and clean files that are in the store, but were not in the gateway
    try {
      for (String storeFile : store.directory().listAll()) {
        if (!commitPoint.containPhysicalIndexFile(storeFile)) {
          try {
            store.directory().deleteFile(storeFile);
          } catch (Exception e) {
            // ignore
          }
        }
      }
    } catch (Exception e) {
      // ignore
    }
  }
Esempio n. 16
0
  @Test
  public void testRenameFile() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    {
      IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
      }
      CodecUtil.writeFooter(output);
      output.close();
    }
    store.renameFile("foo.bar", "bar.foo");
    assertThat(store.directory().listAll().length, is(1));
    final long lastChecksum;
    try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
      lastChecksum = CodecUtil.checksumEntireFile(input);
    }

    try {
      store.directory().openInput("foo.bar", IOContext.DEFAULT);
      fail("file was renamed");
    } catch (FileNotFoundException | NoSuchFileException ex) {
      // expected
    }
    {
      IndexOutput output = store.directory().createOutput("foo.bar", IOContext.DEFAULT);
      int iters = scaledRandomIntBetween(10, 100);
      for (int i = 0; i < iters; i++) {
        BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
        output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
      }
      CodecUtil.writeFooter(output);
      output.close();
    }
    DistributorDirectory distributorDirectory =
        DirectoryUtils.getLeaf(store.directory(), DistributorDirectory.class);
    if (distributorDirectory != null
        && distributorDirectory.getDirectory("foo.bar")
            != distributorDirectory.getDirectory("bar.foo")) {
      try {
        store.renameFile("foo.bar", "bar.foo");
        fail("target file already exists in a different directory");
      } catch (IOException ex) {
        // expected
      }

      try (IndexInput input = store.directory().openInput("bar.foo", IOContext.DEFAULT)) {
        assertThat(lastChecksum, equalTo(CodecUtil.checksumEntireFile(input)));
      }
      assertThat(store.directory().listAll().length, is(2));
      assertDeleteContent(store, directoryService);
      IOUtils.close(store);
    } else {
      store.renameFile("foo.bar", "bar.foo");
      assertThat(store.directory().listAll().length, is(1));
      assertDeleteContent(store, directoryService);
      IOUtils.close(store);
    }
  }
Esempio n. 17
0
  @Test
  public void testMixedChecksums() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    // this time random codec....
    IndexWriter writer =
        new IndexWriter(
            store.directory(),
            newIndexWriterConfig(random(), new MockAnalyzer(random()))
                .setCodec(actualDefaultCodec()));
    int docs = 1 + random().nextInt(100);

    for (int i = 0; i < docs; i++) {
      Document doc = new Document();
      doc.add(
          new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }
    if (random().nextBoolean()) {
      for (int i = 0; i < docs; i++) {
        if (random().nextBoolean()) {
          Document doc = new Document();
          doc.add(
              new TextField(
                  "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          doc.add(
              new TextField(
                  "body",
                  TestUtil.randomRealisticUnicodeString(random()),
                  random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          writer.updateDocument(new Term("id", "" + i), doc);
        }
      }
    }
    if (random().nextBoolean()) {
      DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
    }
    Store.MetadataSnapshot metadata;
    // check before we committed
    try {
      store.getMetadata();
      fail("no index present - expected exception");
    } catch (IndexNotFoundException ex) {
      // expected
    }
    assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
    writer.commit();
    writer.close();
    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    metadata = store.getMetadata();
    assertThat(metadata.asMap().isEmpty(), is(false));
    for (StoreFileMetaData meta : metadata) {
      try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
        if (meta.checksum() == null) {
          String checksum = null;
          try {
            CodecUtil.retrieveChecksum(input);
            fail("expected a corrupt index - posting format has not checksums");
          } catch (CorruptIndexException
              | IndexFormatTooOldException
              | IndexFormatTooNewException ex) {
            try (ChecksumIndexInput checksumIndexInput =
                store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
              checksumIndexInput.seek(meta.length());
              checksum = Store.digestToString(checksumIndexInput.getChecksum());
            }
            // fine - it's a postings format without checksums
            checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
          }
        } else {
          String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
          assertThat(
              "File: " + meta.name() + " has a different checksum",
              meta.checksum(),
              equalTo(checksum));
          assertThat(meta.hasLegacyChecksum(), equalTo(false));
          assertThat(meta.writtenBy(), equalTo(Version.LATEST));
        }
      }
    }
    assertConsistent(store, metadata);
    checksums.write(store);
    metadata = store.getMetadata();
    assertThat(metadata.asMap().isEmpty(), is(false));
    for (StoreFileMetaData meta : metadata) {
      assertThat(
          "file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
      if (meta.hasLegacyChecksum()) {
        try (ChecksumIndexInput checksumIndexInput =
            store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
          checksumIndexInput.seek(meta.length());
          assertThat(
              meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
        }
      } else {
        try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
          String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
          assertThat(
              "File: " + meta.name() + " has a different checksum",
              meta.checksum(),
              equalTo(checksum));
          assertThat(meta.hasLegacyChecksum(), equalTo(false));
          assertThat(meta.writtenBy(), equalTo(Version.LATEST));
        }
      }
    }
    assertConsistent(store, metadata);
    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
  }
Esempio n. 18
0
  @Test
  public void testNewChecksums() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    // set default codec - all segments need checksums
    IndexWriter writer =
        new IndexWriter(
            store.directory(),
            newIndexWriterConfig(random(), new MockAnalyzer(random()))
                .setCodec(actualDefaultCodec()));
    int docs = 1 + random().nextInt(100);

    for (int i = 0; i < docs; i++) {
      Document doc = new Document();
      doc.add(
          new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }
    if (random().nextBoolean()) {
      for (int i = 0; i < docs; i++) {
        if (random().nextBoolean()) {
          Document doc = new Document();
          doc.add(
              new TextField(
                  "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          doc.add(
              new TextField(
                  "body",
                  TestUtil.randomRealisticUnicodeString(random()),
                  random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          writer.updateDocument(new Term("id", "" + i), doc);
        }
      }
    }
    if (random().nextBoolean()) {
      DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
    }
    Store.MetadataSnapshot metadata;
    // check before we committed
    try {
      store.getMetadata();
      fail("no index present - expected exception");
    } catch (IndexNotFoundException ex) {
      // expected
    }
    assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
    writer.commit();
    writer.close();
    metadata = store.getMetadata();
    assertThat(metadata.asMap().isEmpty(), is(false));
    for (StoreFileMetaData meta : metadata) {
      try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
        String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
        assertThat(
            "File: " + meta.name() + " has a different checksum",
            meta.checksum(),
            equalTo(checksum));
        assertThat(meta.hasLegacyChecksum(), equalTo(false));
        assertThat(meta.writtenBy(), equalTo(Version.LATEST));
        if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
          assertThat(meta.hash().length, greaterThan(0));
        }
      }
    }
    assertConsistent(store, metadata);

    TestUtil.checkIndex(store.directory());
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
  }
Esempio n. 19
0
  // IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE
  // OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
  @Test
  public void testWriteLegacyChecksums() throws IOException {
    final ShardId shardId = new ShardId(new Index("index"), 1);
    DirectoryService directoryService = new LuceneManagedDirectoryService(random());
    Store store =
        new Store(
            shardId,
            ImmutableSettings.EMPTY,
            directoryService,
            randomDistributor(directoryService),
            new DummyShardLock(shardId));
    // set default codec - all segments need checksums
    final boolean usesOldCodec = randomBoolean();
    IndexWriter writer =
        new IndexWriter(
            store.directory(),
            newIndexWriterConfig(random(), new MockAnalyzer(random()))
                .setCodec(usesOldCodec ? new OldSIMockingCodec() : actualDefaultCodec()));
    int docs = 1 + random().nextInt(100);

    for (int i = 0; i < docs; i++) {
      Document doc = new Document();
      doc.add(
          new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new TextField(
              "body",
              TestUtil.randomRealisticUnicodeString(random()),
              random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
      doc.add(
          new SortedDocValuesField(
              "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
      writer.addDocument(doc);
    }
    if (random().nextBoolean()) {
      for (int i = 0; i < docs; i++) {
        if (random().nextBoolean()) {
          Document doc = new Document();
          doc.add(
              new TextField(
                  "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          doc.add(
              new TextField(
                  "body",
                  TestUtil.randomRealisticUnicodeString(random()),
                  random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
          writer.updateDocument(new Term("id", "" + i), doc);
        }
      }
    }
    if (random().nextBoolean()) {
      DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
    }
    Store.MetadataSnapshot metadata;
    // check before we committed
    try {
      store.getMetadata();
      fail("no index present - expected exception");
    } catch (IndexNotFoundException ex) {
      // expected
    }
    assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed

    writer.close();
    Store.LegacyChecksums checksums = new Store.LegacyChecksums();
    Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
    for (String file : store.directory().listAll()) {
      if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
        continue;
      }
      BytesRef hash = new BytesRef();
      if (file.startsWith("segments")) {
        hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
      }
      StoreFileMetaData storeFileMetaData =
          new StoreFileMetaData(
              file, store.directory().fileLength(file), file + "checksum", null, hash);
      legacyMeta.put(file, storeFileMetaData);
      checksums.add(storeFileMetaData);
    }
    checksums.write(store);

    metadata = store.getMetadata();
    Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap();
    assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size()));
    if (usesOldCodec) {
      for (StoreFileMetaData meta : legacyMeta.values()) {
        assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
        assertEquals(meta.name() + "checksum", meta.checksum());
        assertTrue(
            meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()),
            stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
      }
    } else {

      // even if we have a legacy checksum - if we use a new codec we should reuse
      for (StoreFileMetaData meta : legacyMeta.values()) {
        assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
        assertFalse(
            meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()),
            stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
        StoreFileMetaData storeFileMetaData = metadata.get(meta.name());
        try (IndexInput input =
            store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) {
          assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput);
          input.seek(meta.length());
          Store.verify(input);
        }
      }
    }
    assertDeleteContent(store, directoryService);
    IOUtils.close(store);
  }