public URI switchToSlave(LifeSupport haCommunicationLife, URI me, URI masterUri) throws Throwable { console.log( "ServerId " + config.get(ClusterSettings.server_id) + ", moving to slave for master " + masterUri); assert masterUri != null; // since we are here it must already have been set from outside idGeneratorFactory.switchToSlave(); if (!isStorePresent(resolver.resolveDependency(FileSystemAbstraction.class), config)) { copyStoreFromMaster(masterUri); } NeoStoreXaDataSource nioneoDataSource = resolver.resolveDependency(NeoStoreXaDataSource.class); nioneoDataSource.afterModeSwitch(); checkDataConsistency( resolver.resolveDependency(RequestContextFactory.class), nioneoDataSource, masterUri); URI slaveUri = startHaCommunication(haCommunicationLife, nioneoDataSource, me, masterUri); console.log( "ServerId " + config.get(ClusterSettings.server_id) + ", successfully moved to slave for master " + masterUri); return slaveUri; }
private URI startHaCommunication( LifeSupport haCommunicationLife, NeoStoreXaDataSource nioneoDataSource, URI me, URI masterUri) { MasterClient master = newMasterClient(masterUri, nioneoDataSource.getStoreId(), haCommunicationLife); Slave slaveImpl = new SlaveImpl( nioneoDataSource.getStoreId(), resolver.resolveDependency(UpdatePuller.class)); SlaveServer server = new SlaveServer( slaveImpl, serverConfig(), logging, resolver.resolveDependency(Monitors.class)); masterDelegateHandler.setDelegate(master); haCommunicationLife.add(slaveImpl); haCommunicationLife.add(server); haCommunicationLife.start(); URI slaveHaURI = createHaURI(me, server); clusterMemberAvailability.memberIsAvailable(HighAvailabilityModeSwitcher.SLAVE, slaveHaURI); return slaveHaURI; }
@Before public void setUp() throws Exception { db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().newImpermanentDatabase(); DependencyResolver dependencyResolver = db.getDependencyResolver(); sampleable = new NodeIdsInUseSampleable( dependencyResolver.resolveDependency(NeoStoreProvider.class).evaluate()); }
@Before public void before() { GraphDatabaseAPI graphDatabaseAPI = dbRule.getGraphDatabaseAPI(); this.db = graphDatabaseAPI; DependencyResolver dependencyResolver = graphDatabaseAPI.getDependencyResolver(); this.bridge = dependencyResolver.resolveDependency(ThreadToStatementContextBridge.class); graphDatabaseAPI .getDependencyResolver() .resolveDependency(Monitors.class) .addMonitorListener(indexOnlineMonitor); }
private void startServicesAgain() throws Throwable { List<Class> services = new ArrayList<>(Arrays.asList(SERVICES_TO_RESTART_FOR_STORE_COPY)); for (Class<?> serviceClass : services) { Lifecycle service = (Lifecycle) resolver.resolveDependency(serviceClass); service.start(); } }
private void copyStoreFromMaster(URI masterUri) throws Throwable { FileSystemAbstraction fs = resolver.resolveDependency(FileSystemAbstraction.class); // Must be called under lock on XaDataSourceManager LifeSupport life = new LifeSupport(); try { // Remove the current store - neostore file is missing, nothing we can really do // stopServicesAndHandleBranchedStore( BranchedDataPolicy.keep_none ); final MasterClient copyMaster = newMasterClient(masterUri, null, life); life.start(); // This will move the copied db to the graphdb location console.log("Copying store from master"); new StoreCopyClient(config, kernelExtensions, console, fs) .copyStore( new StoreCopyClient.StoreCopyRequester() { @Override public Response<?> copyStore(StoreWriter writer) { return copyMaster.copyStore( new RequestContext( 0, config.get(ClusterSettings.server_id).toIntegerIndex(), 0, 0, 0, 0), writer); } @Override public void done() { // Nothing to clean up here } }); startServicesAgain(); console.log("Finished copying store from master"); } finally { life.stop(); } }
private IndexingModule buildIndexing( Config config, JobScheduler scheduler, SchemaIndexProvider indexProvider, LockService lockService, TokenNameLookup tokenNameLookup, Logging logging, IndexingService.Monitor indexingServiceMonitor, NeoStore neoStore, UpdateableSchemaState updateableSchemaState) { final DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider); final IndexingService indexingService = IndexingService.create( new IndexSamplingConfig(config), scheduler, providerMap, new NeoStoreIndexStoreView(lockService, neoStore), tokenNameLookup, updateableSchemaState, toList(new SchemaStorage(neoStore.getSchemaStore()).allIndexRules()), logging, indexingServiceMonitor); final IntegrityValidator integrityValidator = new IntegrityValidator(neoStore, indexingService); // TODO Move to constructor final LabelScanStore labelScanStore = dependencyResolver .resolveDependency( LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED) .getLabelScanStore(); life.add(indexingService); life.add(labelScanStore); return new IndexingModule() { @Override public IndexingService indexingService() { return indexingService; } @Override public LabelScanStore labelScanStore() { return labelScanStore; } @Override public IntegrityValidator integrityValidator() { return integrityValidator; } @Override public SchemaIndexProviderMap schemaIndexProviderMap() { return providerMap; } }; }
private void stopServicesAndHandleBranchedStore(BranchedDataPolicy branchPolicy) throws Throwable { List<Class> services = new ArrayList<>(Arrays.asList(SERVICES_TO_RESTART_FOR_STORE_COPY)); Collections.reverse(services); for (Class<Lifecycle> serviceClass : services) { resolver.resolveDependency(serviceClass).stop(); } handleBranchedStore(branchPolicy); }
private MasterClient newMasterClient(URI masterUri, StoreId storeId, LifeSupport life) { MasterClient masterClient = masterClientResolver.instantiate( masterUri.getHost(), masterUri.getPort(), resolver.resolveDependency(Monitors.class), storeId, life); if (!(masterClient instanceof MasterClient210)) { idGeneratorFactory.doTheThing(); } return masterClient; }
DiagnosticsImpl(ManagementData management) throws NotCompliantMBeanException { super(management); DependencyResolver resolver = management.getKernelData().graphDatabase().getDependencyResolver(); this.diagnostics = resolver.resolveDependency(DiagnosticsManager.class); }
public GraphDbStructureGuide(GraphDatabaseService graph) { this.db = (GraphDatabaseAPI) graph; DependencyResolver dependencyResolver = db.getDependencyResolver(); this.bridge = dependencyResolver.resolveDependency(ThreadToStatementContextBridge.class); this.glops = GlobalGraphOperations.at(db); }
@Override public void start() throws Throwable { this.appender = resolver.resolveDependency(LogicalTransactionStore.class).getAppender(); this.storeApplier = resolver.resolveDependency(TransactionRepresentationStoreApplier.class); this.transactionIdStore = resolver.resolveDependency(TransactionIdStore.class); }
@Override public void start() throws IOException { life = new LifeSupport(); readOnly = config.get(Configuration.read_only); storeDir = config.get(Configuration.store_dir); File store = config.get(Configuration.neo_store); storeFactory.ensureStoreExists(); final TransactionFactory tf; if (providers.shouldInterceptCommitting()) { tf = new InterceptingTransactionFactory(); } else { tf = new TransactionFactory(); } neoStore = storeFactory.newNeoStore(store); neoStoreTransactionContextSupplier = new NeoStoreTransactionContextSupplier(neoStore); schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList()); final NodeManager nodeManager = dependencyResolver.resolveDependency(NodeManager.class); Iterator<? extends Cache<?>> caches = nodeManager.caches().iterator(); persistenceCache = new PersistenceCache( (AutoLoadingCache<NodeImpl>) caches.next(), (AutoLoadingCache<RelationshipImpl>) caches.next(), new Thunk<GraphPropertiesImpl>() { @Override public GraphPropertiesImpl evaluate() { return nodeManager.getGraphProperties(); } }, nodeManager); cacheAccess = new BridgingCacheAccess(nodeManager, schemaCache, updateableSchemaState, persistenceCache); try { indexProvider = dependencyResolver.resolveDependency( SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE); // TODO: Build a real provider map DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider); indexingService = life.add( new IndexingService( scheduler, providerMap, new NeoStoreIndexStoreView(locks, neoStore), tokenNameLookup, updateableSchemaState, logging)); integrityValidator = new IntegrityValidator(neoStore, indexingService); xaContainer = xaFactory.newXaContainer( this, config.get(Configuration.logical_log), new CommandFactory(neoStore, indexingService), new NeoStoreInjectedTransactionValidator(integrityValidator), tf, stateFactory, providers, readOnly); labelScanStore = life.add( dependencyResolver .resolveDependency( LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED) .getLabelScanStore()); fileListing = new NeoStoreFileListing(xaContainer, storeDir, labelScanStore, indexingService); kernel = life.add( new Kernel( txManager, propertyKeyTokens, labelTokens, relationshipTypeTokens, persistenceManager, lockManager, updateableSchemaState, schemaWriteGuard, indexingService, nodeManager, neoStore, persistenceCache, schemaCache, providerMap, labelScanStore, readOnly)); life.init(); // TODO: Why isn't this done in the init() method of the indexing service? if (!readOnly) { neoStore.setRecoveredStatus(true); try { indexingService.initIndexes(loadIndexRules()); xaContainer.openLogicalLog(); } finally { neoStore.setRecoveredStatus(false); } } if (!xaContainer.getResourceManager().hasRecoveredTransactions()) { neoStore.makeStoreOk(); } else { msgLog.debug("Waiting for TM to take care of recovered " + "transactions."); } idGenerators = new ArrayMap<>((byte) 5, false, false); this.idGenerators.put(Node.class, neoStore.getNodeStore()); this.idGenerators.put(Relationship.class, neoStore.getRelationshipStore()); this.idGenerators.put(RelationshipType.class, neoStore.getRelationshipTypeStore()); this.idGenerators.put(Label.class, neoStore.getLabelTokenStore()); this.idGenerators.put(PropertyStore.class, neoStore.getPropertyStore()); this.idGenerators.put( PropertyKeyTokenRecord.class, neoStore.getPropertyStore().getPropertyKeyTokenStore()); setLogicalLogAtCreationTime(xaContainer.getLogicalLog()); life.start(); } catch (Throwable e) { // Something unexpected happened during startup try { // Close the neostore, so that locks are released properly neoStore.close(); } catch (Exception closeException) { msgLog.logMessage("Couldn't close neostore after startup failure"); } throw Exceptions.launderedException(e); } }
private void checkDataConsistency( RequestContextFactory requestContextFactory, NeoStoreXaDataSource nioneoDataSource, URI masterUri) throws Throwable { // Must be called under lock on XaDataSourceManager LifeSupport checkConsistencyLife = new LifeSupport(); TransactionIdStore txIdStore = null; try { MasterClient checkConsistencyMaster = newMasterClient(masterUri, nioneoDataSource.getStoreId(), checkConsistencyLife); checkConsistencyLife.start(); console.log("Checking store consistency with master"); txIdStore = nioneoDataSource.getDependencyResolver().resolveDependency(TransactionIdStore.class); checkDataConsistencyWithMaster( masterUri, checkConsistencyMaster, nioneoDataSource, txIdStore); console.log("Store is consistent"); /* * Pull updates, since the store seems happy and everything. No matter how far back we are, this is just * one thread doing the pulling, while the guard is up. This will prevent a race between all transactions * that may start the moment the database becomes available, where all of them will pull the same txs from * the master but eventually only one will get to apply them. */ console.log("Catching up with master"); resolver .resolveDependency(TransactionCommittingResponseUnpacker.class) .unpackResponse( checkConsistencyMaster.pullUpdates(requestContextFactory.newRequestContext())); console.log("Now consistent with master"); } catch (NoSuchLogVersionException e) { msgLog.logMessage( "Cannot catch up to master by pulling updates, because I cannot find the archived " + "logical log file that has the transaction I would start from. I'm going to copy the whole " + "store from the master instead."); try { stopServicesAndHandleBranchedStore(config.get(HaSettings.branched_data_policy)); } catch (Throwable throwable) { msgLog.warn("Failed preparing for copying the store from the master instance", throwable); } throw e; } catch (StoreUnableToParticipateInClusterException upe) { console.log( "The store is inconsistent. Will treat it as branched and fetch a new one from the master"); msgLog.warn( "Current store is unable to participate in the cluster; fetching new store from master", upe); try { stopServicesAndHandleBranchedStore(config.get(HaSettings.branched_data_policy)); } catch (IOException e) { msgLog.warn("Failed while trying to handle branched data", e); } throw upe; } catch (MismatchingStoreIdException e) { console.log( "The store does not represent the same database as master. Will remove and fetch a new one from master"); if (txIdStore.getLastCommittingTransactionId() == 0) { msgLog.warn("Found and deleting empty store with mismatching store id " + e.getMessage()); stopServicesAndHandleBranchedStore(BranchedDataPolicy.keep_none); } else { msgLog.error("Store cannot participate in cluster due to mismatching store IDs"); } throw e; } finally { checkConsistencyLife.shutdown(); } }
@Override public void start() throws IOException { dependencies = new Dependencies(); life = new LifeSupport(); storeDir = config.get(Configuration.store_dir); File store = config.get(Configuration.neo_store); if (!storeFactory.storeExists()) { storeFactory.createNeoStore().close(); } indexProvider = dependencyResolver.resolveDependency( SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE); // Monitor listeners LoggingLogFileMonitor loggingLogMonitor = new LoggingLogFileMonitor(logging.getMessagesLog(getClass())); monitors.addMonitorListener(loggingLogMonitor); monitors.addMonitorListener( new RecoveryVisitor.Monitor() { @Override public void transactionRecovered(long txId) { recoveredCount.incrementAndGet(); } }); // Upgrade the store before we begin upgradeStore(store, storeMigrationProcess, indexProvider); // Build all modules and their services try { final NeoStoreModule neoStoreModule = buildNeoStore(storeFactory, labelTokens, relationshipTypeTokens, propertyKeyTokenHolder); this.neoStoreModule = neoStoreModule; // TODO The only reason this is here is because of the provider-stuff for // DiskLayer. Remove when possible CacheModule cacheModule = buildCaches( neoStoreModule.neoStore(), cacheProvider, nodeManager, labelTokens, relationshipTypeTokens, propertyKeyTokenHolder); IndexingModule indexingModule = buildIndexing( config, scheduler, indexProvider, lockService, tokenNameLookup, logging, indexingServiceMonitor, neoStoreModule.neoStore(), cacheModule.updateableSchemaState()); StoreLayerModule storeLayerModule = buildStoreLayer( config, neoStoreModule.neoStore(), cacheModule.persistenceCache(), propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, indexingModule.indexingService(), cacheModule.schemaCache()); TransactionLogModule transactionLogModule = buildTransactionLogs( config, logging, indexingModule.labelScanStore(), fs, neoStoreModule.neoStore(), cacheModule.cacheAccess(), indexingModule.indexingService(), indexProviders.values()); buildRecovery( fs, cacheModule.cacheAccess(), indexingModule.indexingService(), indexingModule.labelScanStore(), neoStoreModule.neoStore(), monitors.newMonitor(RecoveryVisitor.Monitor.class), monitors.newMonitor(Recovery.Monitor.class), transactionLogModule.logFiles(), transactionLogModule.logRotationControl(), startupStatistics); KernelModule kernelModule = buildKernel( indexingModule.integrityValidator(), transactionLogModule.logicalTransactionStore(), neoStoreModule.neoStore(), transactionLogModule.storeApplier(), indexingModule.indexingService(), storeLayerModule.storeLayer(), cacheModule.updateableSchemaState(), indexingModule.labelScanStore(), cacheModule.persistenceCache(), indexingModule.schemaIndexProviderMap()); // Do these assignments last so that we can ensure no cyclical dependencies exist this.cacheModule = cacheModule; this.indexingModule = indexingModule; this.storeLayerModule = storeLayerModule; this.transactionLogModule = transactionLogModule; this.kernelModule = kernelModule; dependencies.satisfyDependency(this); satisfyDependencies( neoStoreModule, cacheModule, indexingModule, storeLayerModule, transactionLogModule, kernelModule); } catch (Throwable e) { // Something unexpected happened during startup try { // Close the neostore, so that locks are released properly neoStoreModule.neoStore().close(); } catch (Exception closeException) { msgLog.logMessage("Couldn't close neostore after startup failure"); } throw Exceptions.launderedException(e); } try { life.start(); } catch (Throwable e) { // Something unexpected happened during startup try { // Close the neostore, so that locks are released properly neoStoreModule.neoStore().close(); } catch (Exception closeException) { msgLog.logMessage("Couldn't close neostore after startup failure"); } throw Exceptions.launderedException(e); } }
private StoreId resolveStoreId() { return dependencyResolver.resolveDependency(StoreId.class); }
@Override public void start() throws IOException { life = new LifeSupport(); readOnly = config.get(Configuration.read_only); storeDir = config.get(Configuration.store_dir); File store = config.get(Configuration.neo_store); if (!storeFactory.storeExists()) { storeFactory.createNeoStore().close(); } indexProvider = dependencyResolver.resolveDependency( SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE); storeMigrationProcess.addParticipant(indexProvider.storeMigrationParticipant()); // TODO: Build a real provider map final DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider); storeMigrationProcess.migrateIfNeeded(store.getParentFile()); neoStore = dependencies.satisfyDependency(storeFactory.newNeoStore(false)); dependencies.satisfyDependency(TransactionIdStore.class, neoStore); schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList()); nodeCache = new AutoLoadingCache<>(cacheProvider.node(), nodeLoader(neoStore.getNodeStore())); relationshipCache = new AutoLoadingCache<>( cacheProvider.relationship(), relationshipLoader(neoStore.getRelationshipStore())); RelationshipLoader relationshipLoader = new RelationshipLoader(relationshipCache, new RelationshipChainLoader(neoStore)); PersistenceCache persistenceCache = new PersistenceCache( nodeCache, relationshipCache, nodeManager, relationshipLoader, propertyKeyTokenHolder, relationshipTypeTokens, labelTokens); CacheAccessBackDoor cacheAccess = new BridgingCacheAccess(schemaCache, updateableSchemaState, persistenceCache); try { indexingService = new IndexingService( scheduler, providerMap, new NeoStoreIndexStoreView(lockService, neoStore), tokenNameLookup, updateableSchemaState, indexRuleLoader(), logging, indexingServiceMonitor); // TODO 2.2-future What index rules should be final IntegrityValidator integrityValidator = new IntegrityValidator(neoStore, indexingService); labelScanStore = dependencyResolver .resolveDependency( LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED) .getLabelScanStore(); fileListing = new NeoStoreFileListing( storeDir, labelScanStore, indexingService, legacyIndexProviderLookup); Provider<NeoStore> neoStoreProvider = new Provider<NeoStore>() { @Override public NeoStore instance() { return getNeoStore(); } }; if (config.get(GraphDatabaseSettings.cache_type).equals(CacheLayer.EXPERIMENTAL_OFF)) { storeLayer = new DiskLayer( propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, new SchemaStorage(neoStore.getSchemaStore()), neoStoreProvider, indexingService); } else { storeLayer = new CacheLayer( new DiskLayer( propertyKeyTokenHolder, labelTokens, relationshipTypeTokens, new SchemaStorage(neoStore.getSchemaStore()), neoStoreProvider, indexingService), persistenceCache, indexingService, schemaCache); } LegacyPropertyTrackers legacyPropertyTrackers = new LegacyPropertyTrackers( propertyKeyTokenHolder, nodeManager.getNodePropertyTrackers(), nodeManager.getRelationshipPropertyTrackers(), nodeManager); StatisticsService statisticsService = new StatisticsServiceRepository(fs, config, storeLayer, scheduler).loadStatistics(); final NeoStoreTransactionContextSupplier neoStoreTransactionContextSupplier = new NeoStoreTransactionContextSupplier(neoStore); final TransactionHooks hooks = new TransactionHooks(); File directory = config.get(GraphDatabaseSettings.store_dir); TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(1000, 100_000); PhysicalLogFiles logFiles = new PhysicalLogFiles(directory, PhysicalLogFile.DEFAULT_NAME, fs); LogFileInformation logFileInformation = dependencies.satisfyDependency( LogFileInformation.class, new PhysicalLogFileInformation( logFiles, transactionMetadataCache, neoStore, new PhysicalLogFileInformation.SPI() { @Override public long getTimestampForVersion(long version) throws IOException { LogPosition position = new LogPosition(version, LOG_HEADER_SIZE); try (ReadableLogChannel channel = logFile.getReader(position)) { LogEntryReader<ReadableLogChannel> reader = new VersionAwareLogEntryReader(); LogEntry entry; while ((entry = reader.readLogEntry(channel)) != null) { if (entry instanceof LogEntryStart) { return ((LogEntryStart) entry).getTimeWritten(); } } } return -1; } })); LogPruneStrategy logPruneStrategy = LogPruneStrategyFactory.fromConfigValue( fs, logFileInformation, logFiles, neoStore, config.get(GraphDatabaseSettings.keep_logical_logs)); final TransactionRepresentationStoreApplier storeApplier = dependencies.satisfyDependency( new TransactionRepresentationStoreApplier( indexingService, labelScanStore, neoStore, cacheAccess, lockService, legacyIndexProviderLookup, indexConfigStore, DEFAULT_HIGH_ID_TRACKING)); LoggingLogFileMonitor logMonitor = new LoggingLogFileMonitor(logging.getMessagesLog(getClass())); RecoveryVisitor recoveryVisitor = new RecoveryVisitor(neoStore, storeApplier, recoveredCount, logMonitor); Visitor<ReadableLogChannel, IOException> logFileRecoverer = new LogFileRecoverer(new VersionAwareLogEntryReader(), recoveryVisitor); logFile = dependencies.satisfyDependency( new PhysicalLogFile( fs, logFiles, config.get(GraphDatabaseSettings.logical_log_rotation_threshold), logPruneStrategy, neoStore, neoStore, logMonitor, this, transactionMetadataCache, logFileRecoverer)); final LogicalTransactionStore logicalTransactionStore = dependencies.satisfyDependency( LogicalTransactionStore.class, new PhysicalLogicalTransactionStore( logFile, txIdGenerator, transactionMetadataCache, neoStore, config.get(GraphDatabaseSettings.batched_writes))); TransactionCommitProcess transactionCommitProcess = dependencies.satisfyDependency( TransactionCommitProcess.class, commitProcessFactory.create( logicalTransactionStore, kernelHealth, neoStore, storeApplier, new NeoStoreInjectedTransactionValidator(integrityValidator), false)); /* * This is used by legacy indexes and constraint indexes whenever a transaction is to be spawned * from within an existing transaction. It smells, and we should look over alternatives when time permits. */ Provider<KernelAPI> kernelProvider = new Provider<KernelAPI>() { @Override public KernelAPI instance() { return kernel; } }; ConstraintIndexCreator constraintIndexCreator = new ConstraintIndexCreator(kernelProvider, indexingService); LegacyIndexStore legacyIndexStore = new LegacyIndexStore(config, indexConfigStore, kernelProvider, legacyIndexProviderLookup); StatementOperationParts statementOperations = buildStatementOperations( storeLayer, legacyPropertyTrackers, constraintIndexCreator, updateableSchemaState, guard, legacyIndexStore); kernelTransactions = life.add( new KernelTransactions( neoStoreTransactionContextSupplier, neoStore, locks, integrityValidator, constraintIndexCreator, indexingService, labelScanStore, statementOperations, updateableSchemaState, schemaWriteGuard, providerMap, transactionHeaderInformationFactory, persistenceCache, storeLayer, transactionCommitProcess, indexConfigStore, legacyIndexProviderLookup, hooks, transactionMonitor, life, readOnly)); kernel = new Kernel( statisticsService, kernelTransactions, hooks, kernelHealth, transactionMonitor); life.add(logFile); life.add(logicalTransactionStore); life.add( new LifecycleAdapter() { @Override public void start() throws Throwable { startupStatistics.setNumberOfRecoveredTransactions(recoveredCount.get()); recoveredCount.set(0); loadSchemaCache(); } }); life.add(statisticsService); life.add( new LifecycleAdapter() { @Override public void start() { neoStore.makeStoreOk(); } }); life.add(indexingService); life.add(labelScanStore); kernel.registerTransactionHook(transactionEventHandlers); neoStore.setRecoveredStatus(true); try { life.start(); } finally { neoStore.setRecoveredStatus(false); } propertyKeyTokenHolder.addTokens( neoStore.getPropertyKeyTokenStore().getTokens(Integer.MAX_VALUE)); relationshipTypeTokens.addTokens( neoStore.getRelationshipTypeTokenStore().getTokens(Integer.MAX_VALUE)); labelTokens.addTokens(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE)); } catch (Throwable e) { // Something unexpected happened during startup try { // Close the neostore, so that locks are released properly neoStore.close(); } catch (Exception closeException) { msgLog.logMessage("Couldn't close neostore after startup failure"); } throw Exceptions.launderedException(e); } }