@Test public void shouldNotSnapshotPopulatingIndexes() throws Exception { // GIVEN CountDownLatch populatorLatch = new CountDownLatch(1); IndexAccessor indexAccessor = mock(IndexAccessor.class); IndexingService indexing = newIndexingServiceWithMockedDependencies( populator, indexAccessor, new DataUpdates(new NodePropertyUpdate[0])); int indexId = 1; int indexId2 = 2; File theFile = new File("Blah"); IndexRule rule1 = indexRule(indexId, 2, 3, PROVIDER_DESCRIPTOR); IndexRule rule2 = indexRule(indexId2, 4, 5, PROVIDER_DESCRIPTOR); doAnswer(waitForLatch(populatorLatch)).when(populator).create(); when(indexAccessor.snapshotFiles()).thenAnswer(newResourceIterator(theFile)); when(indexProvider.getInitialState(indexId)).thenReturn(POPULATING); when(indexProvider.getInitialState(indexId2)).thenReturn(ONLINE); indexing.initIndexes(iterator(rule1, rule2)); life.start(); // WHEN ResourceIterator<File> files = indexing.snapshotStoreFiles(); populatorLatch .countDown(); // only now, after the snapshot, is the population job allowed to finish // THEN // We get a snapshot from the online index, but no snapshot from the populating one assertThat(asCollection(files), equalTo(asCollection(iterator(theFile)))); }
@Test public void shouldSnapshotOnlineIndexes() throws Exception { // GIVEN IndexAccessor indexAccessor = mock(IndexAccessor.class); IndexingService indexing = newIndexingServiceWithMockedDependencies( mock(IndexPopulator.class), indexAccessor, new DataUpdates(new NodePropertyUpdate[0])); int indexId = 1; int indexId2 = 2; File theFile = new File("Blah"); IndexRule rule1 = indexRule(indexId, 2, 3, PROVIDER_DESCRIPTOR); IndexRule rule2 = indexRule(indexId2, 4, 5, PROVIDER_DESCRIPTOR); when(indexAccessor.snapshotFiles()).thenAnswer(newResourceIterator(theFile)); when(indexProvider.getInitialState(indexId)).thenReturn(ONLINE); when(indexProvider.getInitialState(indexId2)).thenReturn(ONLINE); indexing.initIndexes(iterator(rule1, rule2)); life.start(); // WHEN ResourceIterator<File> files = indexing.snapshotStoreFiles(); // THEN // We get a snapshot per online index assertThat(asCollection(files), equalTo(asCollection(iterator(theFile, theFile)))); }
private void deleteIndexesContainingArrayValues( File storeDir, PageCache pageCache, SchemaIndexProvider schemaIndexProvider) throws IOException { File indexRoot = getRootDirectory(storeDir, schemaIndexProvider.getProviderDescriptor().getKey()); IndexSamplingConfig samplingConfig = new IndexSamplingConfig(new Config()); List<File> indexesToBeDeleted = new ArrayList<>(); try (SchemaStore schema = schemaStoreProvider.provide(storeDir, pageCache)) { Iterator<SchemaRule> rules = schema.loadAllSchemaRules(); while (rules.hasNext()) { SchemaRule rule = rules.next(); IndexConfiguration indexConfig = new IndexConfiguration(rule.getKind() == UNIQUENESS_CONSTRAINT); try (IndexAccessor accessor = schemaIndexProvider.getOnlineAccessor(rule.getId(), indexConfig, samplingConfig)) { try (IndexReader reader = accessor.newReader()) { if (reader.valueTypesInIndex().contains(Array.class)) { indexesToBeDeleted.add(new File(indexRoot, "" + rule.getId())); } } } } } for (File index : indexesToBeDeleted) { fileSystem.deleteRecursively(index); } }
private IndexRule[] getIndexesNeedingPopulation() { List<IndexRule> indexesNeedingPopulation = new ArrayList<>(); for (SchemaRule rule : schemaCache.schemaRules()) { if (rule.getKind().isIndex()) { IndexRule indexRule = (IndexRule) rule; SchemaIndexProvider provider = schemaIndexProviders.apply(indexRule.getProviderDescriptor()); if (provider.getInitialState(indexRule.getId()) != InternalIndexState.FAILED) { indexesNeedingPopulation.add(indexRule); } } } return indexesNeedingPopulation.toArray(new IndexRule[indexesNeedingPopulation.size()]); }
@Test public void shouldLogIndexStateOnStart() throws Exception { // given TestLogger logger = new TestLogger(); SchemaIndexProvider provider = mock(SchemaIndexProvider.class); when(provider.getProviderDescriptor()).thenReturn(PROVIDER_DESCRIPTOR); SchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(provider); TokenNameLookup mockLookup = mock(TokenNameLookup.class); IndexingService indexingService = new IndexingService( mock(JobScheduler.class), providerMap, mock(IndexStoreView.class), mockLookup, mock(UpdateableSchemaState.class), mockLogging(logger)); IndexRule onlineIndex = indexRule(1, 1, 1, PROVIDER_DESCRIPTOR); IndexRule populatingIndex = indexRule(2, 1, 2, PROVIDER_DESCRIPTOR); IndexRule failedIndex = indexRule(3, 2, 2, PROVIDER_DESCRIPTOR); when(provider.getInitialState(onlineIndex.getId())).thenReturn(ONLINE); when(provider.getInitialState(populatingIndex.getId())) .thenReturn(InternalIndexState.POPULATING); when(provider.getInitialState(failedIndex.getId())).thenReturn(InternalIndexState.FAILED); indexingService.initIndexes(asList(onlineIndex, populatingIndex, failedIndex).iterator()); when(mockLookup.labelGetName(1)).thenReturn("LabelOne"); when(mockLookup.labelGetName(2)).thenReturn("LabelTwo"); when(mockLookup.propertyKeyGetName(1)).thenReturn("propertyOne"); when(mockLookup.propertyKeyGetName(2)).thenReturn("propertyTwo"); logger.clear(); // when indexingService.start(); // then verify(provider).getPopulationFailure(3); logger.assertAtLeastOnce( info("IndexingService.start: index on :LabelOne(propertyOne) is ONLINE")); logger.assertAtLeastOnce( info("IndexingService.start: index on :LabelOne(propertyTwo) is POPULATING")); logger.assertAtLeastOnce( info("IndexingService.start: index on :LabelTwo(propertyTwo) is FAILED")); }
private IndexingService newIndexingServiceWithMockedDependencies( IndexPopulator populator, IndexAccessor accessor, DataUpdates data) throws IOException { StringLogger logger = mock(StringLogger.class); indexProvider = mock(SchemaIndexProvider.class); IndexStoreView storeView = mock(IndexStoreView.class); UpdateableSchemaState schemaState = mock(UpdateableSchemaState.class); when(indexProvider.getProviderDescriptor()).thenReturn(PROVIDER_DESCRIPTOR); when(indexProvider.getPopulator(anyLong(), any(IndexConfiguration.class))) .thenReturn(populator); data.getsProcessedByStoreScanFrom(storeView); when(indexProvider.getOnlineAccessor(anyLong(), any(IndexConfiguration.class))) .thenReturn(accessor); return life.add( new IndexingService( life.add(new Neo4jJobScheduler(logger)), new DefaultSchemaIndexProviderMap(indexProvider), storeView, mock(TokenNameLookup.class), schemaState, mockLogging(logger))); }
// Startup sequence // By doing this sequence of method calls we can ensure that no dependency cycles exist, and get a // clearer view // of the dependency tree, starting at the bottom private void upgradeStore( File store, StoreUpgrader storeMigrationProcess, SchemaIndexProvider indexProvider) { storeMigrationProcess.addParticipant(indexProvider.storeMigrationParticipant()); storeMigrationProcess.migrateIfNeeded(store.getParentFile(), indexProvider, pageCache); }
@Override public void start() throws IOException { life = new LifeSupport(); readOnly = config.get(Configuration.read_only); storeDir = config.get(Configuration.store_dir); File store = config.get(Configuration.neo_store); if (!storeFactory.storeExists()) { storeFactory.createNeoStore().close(); } indexProvider = dependencyResolver.resolveDependency( SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE); storeMigrationProcess.addParticipant(indexProvider.storeMigrationParticipant()); // TODO: Build a real provider map final DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider); storeMigrationProcess.migrateIfNeeded(store.getParentFile()); neoStore = dependencies.satisfyDependency(storeFactory.newNeoStore(false)); dependencies.satisfyDependency(TransactionIdStore.class, neoStore); schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList()); nodeCache = new AutoLoadingCache<>(cacheProvider.node(), nodeLoader(neoStore.getNodeStore())); relationshipCache = new AutoLoadingCache<>( cacheProvider.relationship(), relationshipLoader(neoStore.getRelationshipStore())); RelationshipLoader relationshipLoader = new RelationshipLoader(relationshipCache, new RelationshipChainLoader(neoStore)); PersistenceCache persistenceCache = new PersistenceCache( nodeCache, relationshipCache, nodeManager, relationshipLoader, propertyKeyTokenHolder, relationshipTypeTokens, labelTokens); CacheAccessBackDoor cacheAccess = new BridgingCacheAccess(schemaCache, updateableSchemaState, persistenceCache); try { indexingService = new IndexingService( scheduler, providerMap, new NeoStoreIndexStoreView(lockService, neoStore), tokenNameLookup, updateableSchemaState, indexRuleLoader(), logging, indexingServiceMonitor); // TODO 2.2-future What index rules should be final IntegrityValidator integrityValidator = new IntegrityValidator(neoStore, indexingService); labelScanStore = dependencyResolver .resolveDependency( LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED) .getLabelScanStore(); fileListing = new NeoStoreFileListing( storeDir, labelScanStore, indexingService, legacyIndexProviderLookup); Provider<NeoStore> neoStoreProvider = new Provider<NeoStore>() { @Override public NeoStore instance() { return getNeoStore(); } }; if (config.get(GraphDatabaseSettings.cache_type).equals(CacheLayer.EXPERIMENTAL_OFF)) { storeLayer = new DiskLayer( propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, new SchemaStorage(neoStore.getSchemaStore()), neoStoreProvider, indexingService); } else { storeLayer = new CacheLayer( new DiskLayer( propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, new SchemaStorage(neoStore.getSchemaStore()), neoStoreProvider, indexingService), persistenceCache, indexingService, schemaCache); } LegacyPropertyTrackers legacyPropertyTrackers = new LegacyPropertyTrackers( propertyKeyTokenHolder, nodeManager.getNodePropertyTrackers(), nodeManager.getRelationshipPropertyTrackers(), nodeManager); StatisticsService statisticsService = new StatisticsServiceRepository(fs, config, storeLayer, scheduler).loadStatistics(); final NeoStoreTransactionContextSupplier neoStoreTransactionContextSupplier = new NeoStoreTransactionContextSupplier(neoStore); final TransactionHooks hooks = new TransactionHooks(); File directory = config.get(GraphDatabaseSettings.store_dir); TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(1000, 100_000); PhysicalLogFiles logFiles = new PhysicalLogFiles(directory, PhysicalLogFile.DEFAULT_NAME, fs); LogFileInformation logFileInformation = dependencies.satisfyDependency( LogFileInformation.class, new PhysicalLogFileInformation( logFiles, transactionMetadataCache, neoStore, new PhysicalLogFileInformation.SPI() { @Override public long getTimestampForVersion(long version) throws IOException { LogPosition position = new LogPosition(version, LOG_HEADER_SIZE); try (ReadableLogChannel channel = logFile.getReader(position)) { LogEntryReader<ReadableLogChannel> reader = new VersionAwareLogEntryReader(); LogEntry entry; while ((entry = reader.readLogEntry(channel)) != null) { if (entry instanceof LogEntryStart) { return ((LogEntryStart) entry).getTimeWritten(); } } } return -1; } })); LogPruneStrategy logPruneStrategy = LogPruneStrategyFactory.fromConfigValue( fs, logFileInformation, logFiles, neoStore, config.get(GraphDatabaseSettings.keep_logical_logs)); final TransactionRepresentationStoreApplier storeApplier = dependencies.satisfyDependency( new TransactionRepresentationStoreApplier( indexingService, labelScanStore, neoStore, cacheAccess, lockService, legacyIndexProviderLookup, indexConfigStore, DEFAULT_HIGH_ID_TRACKING)); LoggingLogFileMonitor logMonitor = new LoggingLogFileMonitor(logging.getMessagesLog(getClass())); RecoveryVisitor recoveryVisitor = new RecoveryVisitor(neoStore, storeApplier, recoveredCount, logMonitor); Visitor<ReadableLogChannel, IOException> logFileRecoverer = new LogFileRecoverer(new VersionAwareLogEntryReader(), recoveryVisitor); logFile = dependencies.satisfyDependency( new PhysicalLogFile( fs, logFiles, config.get(GraphDatabaseSettings.logical_log_rotation_threshold), logPruneStrategy, neoStore, neoStore, logMonitor, this, transactionMetadataCache, logFileRecoverer)); final LogicalTransactionStore logicalTransactionStore = dependencies.satisfyDependency( LogicalTransactionStore.class, new PhysicalLogicalTransactionStore( logFile, txIdGenerator, transactionMetadataCache, neoStore, config.get(GraphDatabaseSettings.batched_writes))); TransactionCommitProcess transactionCommitProcess = dependencies.satisfyDependency( TransactionCommitProcess.class, commitProcessFactory.create( logicalTransactionStore, kernelHealth, neoStore, storeApplier, new NeoStoreInjectedTransactionValidator(integrityValidator), false)); /* * This is used by legacy indexes and constraint indexes whenever a transaction is to be spawned * from within an existing transaction. It smells, and we should look over alternatives when time permits. */ Provider<KernelAPI> kernelProvider = new Provider<KernelAPI>() { @Override public KernelAPI instance() { return kernel; } }; ConstraintIndexCreator constraintIndexCreator = new ConstraintIndexCreator(kernelProvider, indexingService); LegacyIndexStore legacyIndexStore = new LegacyIndexStore(config, indexConfigStore, kernelProvider, legacyIndexProviderLookup); StatementOperationParts statementOperations = buildStatementOperations( storeLayer, legacyPropertyTrackers, constraintIndexCreator, updateableSchemaState, guard, legacyIndexStore); kernelTransactions = life.add( new KernelTransactions( neoStoreTransactionContextSupplier, neoStore, locks, integrityValidator, constraintIndexCreator, indexingService, labelScanStore, statementOperations, updateableSchemaState, schemaWriteGuard, providerMap, transactionHeaderInformationFactory, persistenceCache, storeLayer, transactionCommitProcess, indexConfigStore, legacyIndexProviderLookup, hooks, transactionMonitor, life, readOnly)); kernel = new Kernel( statisticsService, kernelTransactions, hooks, kernelHealth, transactionMonitor); life.add(logFile); life.add(logicalTransactionStore); life.add( new LifecycleAdapter() { @Override public void start() throws Throwable { startupStatistics.setNumberOfRecoveredTransactions(recoveredCount.get()); recoveredCount.set(0); loadSchemaCache(); } }); life.add(statisticsService); life.add( new LifecycleAdapter() { @Override public void start() { neoStore.makeStoreOk(); } }); life.add(indexingService); life.add(labelScanStore); kernel.registerTransactionHook(transactionEventHandlers); neoStore.setRecoveredStatus(true); try { life.start(); } finally { neoStore.setRecoveredStatus(false); } propertyKeyTokenHolder.addTokens( neoStore.getPropertyKeyTokenStore().getTokens(Integer.MAX_VALUE)); relationshipTypeTokens.addTokens( neoStore.getRelationshipTypeTokenStore().getTokens(Integer.MAX_VALUE)); labelTokens.addTokens(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE)); } catch (Throwable e) { // Something unexpected happened during startup try { // Close the neostore, so that locks are released properly neoStore.close(); } catch (Exception closeException) { msgLog.logMessage("Couldn't close neostore after startup failure"); } throw Exceptions.launderedException(e); } }