/** * Tries to lock the given shards ID. A shard lock is required to perform any kind of write * operation on a shards data directory like deleting files, creating a new index writer or * recover from a different shard instance into it. If the shard lock can not be acquired an * {@link org.apache.lucene.store.LockObtainFailedException} is thrown * * @param id the shard ID to lock * @param lockTimeoutMS the lock timeout in milliseconds * @return the shard lock. Call {@link ShardLock#close()} to release the lock * @throws IOException if an IOException occurs. */ public ShardLock shardLock(final ShardId id, long lockTimeoutMS) throws IOException { logger.trace("acquiring node shardlock on [{}], timeout [{}]", id, lockTimeoutMS); final InternalShardLock shardLock; final boolean acquired; synchronized (shardLocks) { if (shardLocks.containsKey(id)) { shardLock = shardLocks.get(id); shardLock.incWaitCount(); acquired = false; } else { shardLock = new InternalShardLock(id); shardLocks.put(id, shardLock); acquired = true; } } if (acquired == false) { boolean success = false; try { shardLock.acquire(lockTimeoutMS); success = true; } finally { if (success == false) { shardLock.decWaitCount(); } } } logger.trace("successfully acquired shardlock for [{}]", id); return new ShardLock(id) { // new instance prevents double closing @Override protected void closeInternal() { shardLock.release(); logger.trace("released shard lock for [{}]", id); } }; }
@Test public void testCleanUpWithLegacyChecksums() throws IOException { Map<String, StoreFileMetaData> metaDataMap = new HashMap<>(); metaDataMap.put( "segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[] {1}))); metaDataMap.put( "_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef())); Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(metaDataMap); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), new DummyShardLock(shardId)); for (String file : metaDataMap.keySet()) { try (IndexOutput output = store.directory().createOutput(file, IOContext.DEFAULT)) { BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024)); output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length); CodecUtil.writeFooter(output); } } store.verifyAfterCleanup(snapshot, snapshot); store.deleteContent(); IOUtils.close(store); }
/** * Legacy indices without lucene CRC32 did never write or calculate checksums for segments_N files * but for other files */ @Test public void testRecoveryDiffWithLegacyCommit() { Map<String, StoreFileMetaData> metaDataMap = new HashMap<>(); metaDataMap.put( "segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[] {1}))); metaDataMap.put( "_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef())); Store.MetadataSnapshot first = new Store.MetadataSnapshot(metaDataMap); Store.MetadataSnapshot second = new Store.MetadataSnapshot(metaDataMap); Store.RecoveryDiff recoveryDiff = first.recoveryDiff(second); assertEquals(recoveryDiff.toString(), recoveryDiff.different.size(), 2); }
/** Returns all currently lock shards */ public Set<ShardId> lockedShards() { synchronized (shardLocks) { ImmutableSet.Builder<ShardId> builder = ImmutableSet.builder(); return builder.addAll(shardLocks.keySet()).build(); } }
// IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE // OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @Test public void testWriteLegacyChecksums() throws IOException { final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), new DummyShardLock(shardId)); // set default codec - all segments need checksums final boolean usesOldCodec = randomBoolean(); IndexWriter writer = new IndexWriter( store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())) .setCodec(usesOldCodec ? new OldSIMockingCodec() : actualDefaultCodec())); int docs = 1 + random().nextInt(100); for (int i = 0; i < docs; i++) { Document doc = new Document(); doc.add( new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new SortedDocValuesField( "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); } if (random().nextBoolean()) { for (int i = 0; i < docs; i++) { if (random().nextBoolean()) { Document doc = new Document(); doc.add( new TextField( "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.updateDocument(new Term("id", "" + i), doc); } } } if (random().nextBoolean()) { DirectoryReader.open(writer, random().nextBoolean()).close(); // flush } Store.MetadataSnapshot metadata; // check before we committed try { store.getMetadata(); fail("no index present - expected exception"); } catch (IndexNotFoundException ex) { // expected } assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed writer.close(); Store.LegacyChecksums checksums = new Store.LegacyChecksums(); Map<String, StoreFileMetaData> legacyMeta = new HashMap<>(); for (String file : store.directory().listAll()) { if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { continue; } BytesRef hash = new BytesRef(); if (file.startsWith("segments")) { hash = Store.MetadataSnapshot.hashFile(store.directory(), file); } StoreFileMetaData storeFileMetaData = new StoreFileMetaData( file, store.directory().fileLength(file), file + "checksum", null, hash); legacyMeta.put(file, storeFileMetaData); checksums.add(storeFileMetaData); } checksums.write(store); metadata = store.getMetadata(); Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap(); assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size())); if (usesOldCodec) { for (StoreFileMetaData meta : legacyMeta.values()) { assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name())); assertEquals(meta.name() + "checksum", meta.checksum()); assertTrue( meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta)); } } else { // even if we have a legacy checksum - if we use a new codec we should reuse for (StoreFileMetaData meta : legacyMeta.values()) { assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name())); assertFalse( meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta)); StoreFileMetaData storeFileMetaData = metadata.get(meta.name()); try (IndexInput input = store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) { assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput); input.seek(meta.length()); Store.verify(input); } } } assertDeleteContent(store, directoryService); IOUtils.close(store); }
@Test public void testCleanupFromSnapshot() throws IOException { final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random()); Store store = new Store( shardId, ImmutableSettings.EMPTY, directoryService, randomDistributor(directoryService), new DummyShardLock(shardId)); // this time random codec.... IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(actualDefaultCodec()); // we keep all commits and that allows us clean based on multiple snapshots indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig); int docs = 1 + random().nextInt(100); int numCommits = 0; for (int i = 0; i < docs; i++) { if (i > 0 && randomIntBetween(0, 10) == 0) { writer.commit(); numCommits++; } Document doc = new Document(); doc.add( new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new SortedDocValuesField( "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); } if (numCommits < 1) { writer.commit(); Document doc = new Document(); doc.add( new TextField( "id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new SortedDocValuesField( "dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); } Store.MetadataSnapshot firstMeta = store.getMetadata(); if (random().nextBoolean()) { for (int i = 0; i < docs; i++) { if (random().nextBoolean()) { Document doc = new Document(); doc.add( new TextField( "id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add( new TextField( "body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.updateDocument(new Term("id", "" + i), doc); } } } writer.commit(); writer.close(); Store.MetadataSnapshot secondMeta = store.getMetadata(); Store.LegacyChecksums checksums = new Store.LegacyChecksums(); Map<String, StoreFileMetaData> legacyMeta = new HashMap<>(); for (String file : store.directory().listAll()) { if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { continue; } BytesRef hash = new BytesRef(); if (file.startsWith("segments")) { hash = Store.MetadataSnapshot.hashFile(store.directory(), file); } StoreFileMetaData storeFileMetaData = new StoreFileMetaData( file, store.directory().fileLength(file), file + "checksum", null, hash); legacyMeta.put(file, storeFileMetaData); checksums.add(storeFileMetaData); } checksums.write( store); // write one checksum file here - we expect it to survive all the cleanups if (randomBoolean()) { store.cleanupAndVerify("test", firstMeta); String[] strings = store.directory().listAll(); int numChecksums = 0; int numNotFound = 0; for (String file : strings) { assertTrue(firstMeta.contains(file) || Store.isChecksum(file)); if (Store.isChecksum(file)) { numChecksums++; } else if (secondMeta.contains(file) == false) { numNotFound++; } } assertTrue( "at least one file must not be in here since we have two commits?", numNotFound > 0); assertEquals( "we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1); } else { store.cleanupAndVerify("test", secondMeta); String[] strings = store.directory().listAll(); int numChecksums = 0; int numNotFound = 0; for (String file : strings) { assertTrue(secondMeta.contains(file) || Store.isChecksum(file)); if (Store.isChecksum(file)) { numChecksums++; } else if (firstMeta.contains(file) == false) { numNotFound++; } } assertTrue( "at least one file must not be in here since we have two commits?", numNotFound > 0); assertEquals( "we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1); } store.deleteContent(); IOUtils.close(store); }