@Override
 public void start() throws Throwable {
   life.start(); // Binds ZooClient so we have a local address to use next
   Listeners.notifyListeners(
       bindingListeners,
       new Listeners.Notification<BindingListener>() {
         @Override
         public void notify(BindingListener listener) {
           listener.listeningAt(URI.create(client.getClusterServer()));
         }
       });
   client.refreshMasterFromZooKeeper();
   Listeners.notifyListeners(
       haListeners,
       new Listeners.Notification<ClusterMemberListener>() {
         @Override
         public void notify(ClusterMemberListener listener) {
           listener.coordinatorIsElected(new InstanceId(client.getCachedMaster().getMachineId()));
         }
       });
   Listeners.notifyListeners(
       haListeners,
       new Listeners.Notification<ClusterMemberListener>() {
         @Override
         public void notify(ClusterMemberListener listener) {
           listener.memberIsAvailable(
               HighAvailabilityModeSwitcher.MASTER,
               new InstanceId(client.getCachedMaster().getMachineId()),
               URI.create("ha://" + client.getCachedMaster().getServerAsString()));
         }
       });
 }
예제 #2
0
  private URI startHaCommunication(
      LifeSupport haCommunicationLife,
      NeoStoreXaDataSource nioneoDataSource,
      URI me,
      URI masterUri) {
    MasterClient master =
        newMasterClient(masterUri, nioneoDataSource.getStoreId(), haCommunicationLife);

    Slave slaveImpl =
        new SlaveImpl(
            nioneoDataSource.getStoreId(), resolver.resolveDependency(UpdatePuller.class));

    SlaveServer server =
        new SlaveServer(
            slaveImpl, serverConfig(), logging, resolver.resolveDependency(Monitors.class));

    masterDelegateHandler.setDelegate(master);

    haCommunicationLife.add(slaveImpl);
    haCommunicationLife.add(server);
    haCommunicationLife.start();

    URI slaveHaURI = createHaURI(me, server);
    clusterMemberAvailability.memberIsAvailable(HighAvailabilityModeSwitcher.SLAVE, slaveHaURI);

    return slaveHaURI;
  }
예제 #3
0
  public BatchingNeoStore(
      FileSystemAbstraction fileSystem,
      String storeDir,
      Configuration config,
      Monitor writeMonitor,
      Logging logging,
      WriterFactory writerFactory,
      Monitors monitors) {
    this.config = config;
    this.writeMonitor = writeMonitor;
    this.writerFactory = writerFactory;
    this.monitors = monitors;
    this.fileSystem = life.add(new ChannelReusingFileSystemAbstraction(fileSystem));

    this.logger = logging.getMessagesLog(getClass());
    this.neo4jConfig =
        configForStoreDir(
            new Config(
                stringMap(dense_node_threshold.name(), valueOf(config.denseNodeThreshold())),
                GraphDatabaseSettings.class),
            new File(storeDir));

    this.neoStore = newBatchWritingNeoStore();
    this.propertyKeyRepository =
        new BatchingPropertyKeyTokenRepository(neoStore.getPropertyKeyTokenStore());
    this.labelRepository = new BatchingLabelTokenRepository(neoStore.getLabelTokenStore());
    this.relationshipTypeRepository =
        new BatchingRelationshipTypeTokenRepository(neoStore.getRelationshipTypeTokenStore());
    life.start();
  }
예제 #4
0
  private void copyStoreFromMaster(URI masterUri) throws Throwable {
    FileSystemAbstraction fs = resolver.resolveDependency(FileSystemAbstraction.class);
    // Must be called under lock on XaDataSourceManager
    LifeSupport life = new LifeSupport();
    try {
      // Remove the current store - neostore file is missing, nothing we can really do
      //            stopServicesAndHandleBranchedStore( BranchedDataPolicy.keep_none );
      final MasterClient copyMaster = newMasterClient(masterUri, null, life);
      life.start();

      // This will move the copied db to the graphdb location
      console.log("Copying store from master");
      new StoreCopyClient(config, kernelExtensions, console, fs)
          .copyStore(
              new StoreCopyClient.StoreCopyRequester() {
                @Override
                public Response<?> copyStore(StoreWriter writer) {
                  return copyMaster.copyStore(
                      new RequestContext(
                          0, config.get(ClusterSettings.server_id).toIntegerIndex(), 0, 0, 0, 0),
                      writer);
                }

                @Override
                public void done() { // Nothing to clean up here
                }
              });

      startServicesAgain();
      console.log("Finished copying store from master");
    } finally {
      life.stop();
    }
  }
예제 #5
0
 @Override
 public void init() {
   luceneDataSource = life.add(new LuceneDataSource(config, indexStore, fileSystemAbstraction));
   // TODO Don't do this here, do proper life cycle management
   life.start();
   LuceneIndexImplementation indexImplementation = new LuceneIndexImplementation(luceneDataSource);
   indexProviders.registerIndexProvider(
       LuceneIndexImplementation.SERVICE_NAME, indexImplementation);
 }
  @Test
  public void shouldSendAMessageFromAClientWhichIsReceivedByAServer() throws Exception {

    // given

    CountDownLatch latch = new CountDownLatch(1);

    LifeSupport life = new LifeSupport();

    Server server1 =
        new Server(
            latch,
            MapUtil.stringMap(
                ClusterSettings.cluster_server.name(),
                "localhost:1234",
                ClusterSettings.server_id.name(),
                "1",
                ClusterSettings.initial_hosts.name(),
                "localhost:1234,localhost:1235"));

    life.add(server1);

    Server server2 =
        new Server(
            latch,
            MapUtil.stringMap(
                ClusterSettings.cluster_server.name(),
                "localhost:1235",
                ClusterSettings.server_id.name(),
                "2",
                ClusterSettings.initial_hosts.name(),
                "localhost:1234,localhost:1235"));

    life.add(server2);

    life.start();

    // when

    server1.process(
        Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World"));

    // then

    latch.await(5, TimeUnit.SECONDS);

    assertTrue("server1 should have processed the message", server1.processedMessage());
    assertTrue("server2 should have processed the message", server2.processedMessage());

    life.shutdown();
  }
예제 #7
0
  @Override
  public void start() throws Throwable {
    Clusters clusters = clustersProvider.clusters();

    life = new LifeSupport();

    for (int i = 0; i < clusters.getClusters().size(); i++) {
      Clusters.Cluster cluster = clusters.getClusters().get(i);
      ManagedCluster managedCluster = new ManagedCluster(cluster);
      clusterMap.put(cluster.getName(), managedCluster);
      life.add(managedCluster);
    }

    life.start();
  }
예제 #8
0
 @Override
 public void start() throws Throwable {
   life = new LifeSupport();
   for (XaDataSource ds : dataSources.values()) {
     life.add(ds);
   }
   life.start();
   for (DataSourceRegistrationListener listener : dsRegistrationListeners) {
     try {
       for (XaDataSource ds : dataSources.values()) {
         listener.registeredDataSource(ds);
       }
     } catch (Throwable t) {
       msgLog.logMessage("Failed when notifying registering listener", t);
     }
   }
 }
예제 #9
0
  @Override
  public void start() throws Throwable {
    Clusters clusters = clustersProvider.clusters();

    life = new LifeSupport();

    // Started so instances added here will be started immediately, and in case of exceptions they
    // can be
    // shutdown() or stop()ped properly
    life.start();

    for (int i = 0; i < clusters.getClusters().size(); i++) {
      Clusters.Cluster cluster = clusters.getClusters().get(i);
      ManagedCluster managedCluster = new ManagedCluster(cluster);
      clusterMap.put(cluster.getName(), managedCluster);
      life.add(managedCluster);
    }
  }
예제 #10
0
 private StandaloneClusterClient(Logging logging, ClusterClient clusterClient) {
   life.add(logging);
   life.add(clusterClient);
   addShutdownHook();
   life.start();
 }
예제 #11
0
  @Override
  public void start() throws IOException {
    life = new LifeSupport();
    readOnly = config.get(Configuration.read_only);
    storeDir = config.get(Configuration.store_dir);
    File store = config.get(Configuration.neo_store);
    if (!storeFactory.storeExists()) {
      storeFactory.createNeoStore().close();
    }
    indexProvider =
        dependencyResolver.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    storeMigrationProcess.addParticipant(indexProvider.storeMigrationParticipant());
    // TODO: Build a real provider map
    final DefaultSchemaIndexProviderMap providerMap =
        new DefaultSchemaIndexProviderMap(indexProvider);
    storeMigrationProcess.migrateIfNeeded(store.getParentFile());
    neoStore = dependencies.satisfyDependency(storeFactory.newNeoStore(false));
    dependencies.satisfyDependency(TransactionIdStore.class, neoStore);

    schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList());

    nodeCache = new AutoLoadingCache<>(cacheProvider.node(), nodeLoader(neoStore.getNodeStore()));
    relationshipCache =
        new AutoLoadingCache<>(
            cacheProvider.relationship(), relationshipLoader(neoStore.getRelationshipStore()));
    RelationshipLoader relationshipLoader =
        new RelationshipLoader(relationshipCache, new RelationshipChainLoader(neoStore));
    PersistenceCache persistenceCache =
        new PersistenceCache(
            nodeCache,
            relationshipCache,
            nodeManager,
            relationshipLoader,
            propertyKeyTokenHolder,
            relationshipTypeTokens,
            labelTokens);
    CacheAccessBackDoor cacheAccess =
        new BridgingCacheAccess(schemaCache, updateableSchemaState, persistenceCache);
    try {
      indexingService =
          new IndexingService(
              scheduler,
              providerMap,
              new NeoStoreIndexStoreView(lockService, neoStore),
              tokenNameLookup,
              updateableSchemaState,
              indexRuleLoader(),
              logging,
              indexingServiceMonitor); // TODO 2.2-future What index rules should be
      final IntegrityValidator integrityValidator =
          new IntegrityValidator(neoStore, indexingService);
      labelScanStore =
          dependencyResolver
              .resolveDependency(
                  LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
              .getLabelScanStore();
      fileListing =
          new NeoStoreFileListing(
              storeDir, labelScanStore, indexingService, legacyIndexProviderLookup);
      Provider<NeoStore> neoStoreProvider =
          new Provider<NeoStore>() {
            @Override
            public NeoStore instance() {
              return getNeoStore();
            }
          };

      if (config.get(GraphDatabaseSettings.cache_type).equals(CacheLayer.EXPERIMENTAL_OFF)) {
        storeLayer =
            new DiskLayer(
                propertyKeyTokenHolder,
                labelTokens,
                relationshipTypeTokens,
                new SchemaStorage(neoStore.getSchemaStore()),
                neoStoreProvider,
                indexingService);
      } else {
        storeLayer =
            new CacheLayer(
                new DiskLayer(
                    propertyKeyTokenHolder,
                    labelTokens,
                    relationshipTypeTokens,
                    new SchemaStorage(neoStore.getSchemaStore()),
                    neoStoreProvider,
                    indexingService),
                persistenceCache,
                indexingService,
                schemaCache);
      }

      LegacyPropertyTrackers legacyPropertyTrackers =
          new LegacyPropertyTrackers(
              propertyKeyTokenHolder,
              nodeManager.getNodePropertyTrackers(),
              nodeManager.getRelationshipPropertyTrackers(),
              nodeManager);
      StatisticsService statisticsService =
          new StatisticsServiceRepository(fs, config, storeLayer, scheduler).loadStatistics();
      final NeoStoreTransactionContextSupplier neoStoreTransactionContextSupplier =
          new NeoStoreTransactionContextSupplier(neoStore);

      final TransactionHooks hooks = new TransactionHooks();
      File directory = config.get(GraphDatabaseSettings.store_dir);
      TransactionMetadataCache transactionMetadataCache =
          new TransactionMetadataCache(1000, 100_000);
      PhysicalLogFiles logFiles = new PhysicalLogFiles(directory, PhysicalLogFile.DEFAULT_NAME, fs);

      LogFileInformation logFileInformation =
          dependencies.satisfyDependency(
              LogFileInformation.class,
              new PhysicalLogFileInformation(
                  logFiles,
                  transactionMetadataCache,
                  neoStore,
                  new PhysicalLogFileInformation.SPI() {
                    @Override
                    public long getTimestampForVersion(long version) throws IOException {
                      LogPosition position = new LogPosition(version, LOG_HEADER_SIZE);
                      try (ReadableLogChannel channel = logFile.getReader(position)) {
                        LogEntryReader<ReadableLogChannel> reader =
                            new VersionAwareLogEntryReader();
                        LogEntry entry;
                        while ((entry = reader.readLogEntry(channel)) != null) {
                          if (entry instanceof LogEntryStart) {
                            return ((LogEntryStart) entry).getTimeWritten();
                          }
                        }
                      }
                      return -1;
                    }
                  }));

      LogPruneStrategy logPruneStrategy =
          LogPruneStrategyFactory.fromConfigValue(
              fs,
              logFileInformation,
              logFiles,
              neoStore,
              config.get(GraphDatabaseSettings.keep_logical_logs));

      final TransactionRepresentationStoreApplier storeApplier =
          dependencies.satisfyDependency(
              new TransactionRepresentationStoreApplier(
                  indexingService,
                  labelScanStore,
                  neoStore,
                  cacheAccess,
                  lockService,
                  legacyIndexProviderLookup,
                  indexConfigStore,
                  DEFAULT_HIGH_ID_TRACKING));

      LoggingLogFileMonitor logMonitor =
          new LoggingLogFileMonitor(logging.getMessagesLog(getClass()));
      RecoveryVisitor recoveryVisitor =
          new RecoveryVisitor(neoStore, storeApplier, recoveredCount, logMonitor);
      Visitor<ReadableLogChannel, IOException> logFileRecoverer =
          new LogFileRecoverer(new VersionAwareLogEntryReader(), recoveryVisitor);
      logFile =
          dependencies.satisfyDependency(
              new PhysicalLogFile(
                  fs,
                  logFiles,
                  config.get(GraphDatabaseSettings.logical_log_rotation_threshold),
                  logPruneStrategy,
                  neoStore,
                  neoStore,
                  logMonitor,
                  this,
                  transactionMetadataCache,
                  logFileRecoverer));

      final LogicalTransactionStore logicalTransactionStore =
          dependencies.satisfyDependency(
              LogicalTransactionStore.class,
              new PhysicalLogicalTransactionStore(
                  logFile,
                  txIdGenerator,
                  transactionMetadataCache,
                  neoStore,
                  config.get(GraphDatabaseSettings.batched_writes)));

      TransactionCommitProcess transactionCommitProcess =
          dependencies.satisfyDependency(
              TransactionCommitProcess.class,
              commitProcessFactory.create(
                  logicalTransactionStore,
                  kernelHealth,
                  neoStore,
                  storeApplier,
                  new NeoStoreInjectedTransactionValidator(integrityValidator),
                  false));

      /*
       * This is used by legacy indexes and constraint indexes whenever a transaction is to be spawned
       * from within an existing transaction. It smells, and we should look over alternatives when time permits.
       */
      Provider<KernelAPI> kernelProvider =
          new Provider<KernelAPI>() {
            @Override
            public KernelAPI instance() {
              return kernel;
            }
          };

      ConstraintIndexCreator constraintIndexCreator =
          new ConstraintIndexCreator(kernelProvider, indexingService);

      LegacyIndexStore legacyIndexStore =
          new LegacyIndexStore(config, indexConfigStore, kernelProvider, legacyIndexProviderLookup);

      StatementOperationParts statementOperations =
          buildStatementOperations(
              storeLayer,
              legacyPropertyTrackers,
              constraintIndexCreator,
              updateableSchemaState,
              guard,
              legacyIndexStore);

      kernelTransactions =
          life.add(
              new KernelTransactions(
                  neoStoreTransactionContextSupplier,
                  neoStore,
                  locks,
                  integrityValidator,
                  constraintIndexCreator,
                  indexingService,
                  labelScanStore,
                  statementOperations,
                  updateableSchemaState,
                  schemaWriteGuard,
                  providerMap,
                  transactionHeaderInformationFactory,
                  persistenceCache,
                  storeLayer,
                  transactionCommitProcess,
                  indexConfigStore,
                  legacyIndexProviderLookup,
                  hooks,
                  transactionMonitor,
                  life,
                  readOnly));

      kernel =
          new Kernel(
              statisticsService, kernelTransactions, hooks, kernelHealth, transactionMonitor);

      life.add(logFile);
      life.add(logicalTransactionStore);
      life.add(
          new LifecycleAdapter() {
            @Override
            public void start() throws Throwable {
              startupStatistics.setNumberOfRecoveredTransactions(recoveredCount.get());
              recoveredCount.set(0);
              loadSchemaCache();
            }
          });
      life.add(statisticsService);
      life.add(
          new LifecycleAdapter() {
            @Override
            public void start() {
              neoStore.makeStoreOk();
            }
          });
      life.add(indexingService);
      life.add(labelScanStore);

      kernel.registerTransactionHook(transactionEventHandlers);
      neoStore.setRecoveredStatus(true);
      try {
        life.start();
      } finally {
        neoStore.setRecoveredStatus(false);
      }

      propertyKeyTokenHolder.addTokens(
          neoStore.getPropertyKeyTokenStore().getTokens(Integer.MAX_VALUE));
      relationshipTypeTokens.addTokens(
          neoStore.getRelationshipTypeTokenStore().getTokens(Integer.MAX_VALUE));
      labelTokens.addTokens(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    } catch (Throwable e) { // Something unexpected happened during startup
      try { // Close the neostore, so that locks are released properly
        neoStore.close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }
  }
    @Override
    public void start() throws Throwable {

      life.start();
    }
 @Override
 public synchronized void start() throws Throwable {
   haCommunicationLife.start();
 }
예제 #14
0
  BatchInserterImpl(
      String storeDir,
      FileSystemAbstraction fileSystem,
      Map<String, String> stringParams,
      Iterable<KernelExtensionFactory<?>> kernelExtensions) {
    rejectAutoUpgrade(stringParams);
    Map<String, String> params = getDefaultParams();
    params.putAll(stringParams);
    config =
        StoreFactory.configForStoreDir(
            new Config(params, GraphDatabaseSettings.class), new File(storeDir));

    life = new LifeSupport();
    this.fileSystem = fileSystem;
    this.storeDir = new File(FileUtils.fixSeparatorsInPath(storeDir));
    Neo4jJobScheduler jobScheduler = life.add(new Neo4jJobScheduler());
    LifecycledPageCache pageCache =
        life.add(new LifecycledPageCache(fileSystem, jobScheduler, config));

    msgLog = StringLogger.loggerDirectory(fileSystem, this.storeDir);
    logging = new SingleLoggingService(msgLog);
    storeLocker = new StoreLocker(fileSystem);
    storeLocker.checkLock(this.storeDir);

    boolean dump = config.get(GraphDatabaseSettings.dump_configuration);
    this.idGeneratorFactory = new DefaultIdGeneratorFactory();

    Monitors monitors = new Monitors();
    StoreFactory sf =
        new StoreFactory(config, idGeneratorFactory, pageCache, fileSystem, msgLog, monitors);

    if (dump) {
      dumpConfiguration(params);
    }
    msgLog.logMessage(Thread.currentThread() + " Starting BatchInserter(" + this + ")");
    neoStore = sf.newNeoStore(true);
    if (!neoStore.isStoreOk()) {
      throw new IllegalStateException(storeDir + " store is not cleanly shutdown.");
    }
    neoStore.makeStoreOk();
    Token[] indexes = getPropertyKeyTokenStore().getTokens(10000);
    propertyKeyTokens = new BatchTokenHolder(indexes);
    labelTokens = new BatchTokenHolder(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    Token[] types = getRelationshipTypeStore().getTokens(Integer.MAX_VALUE);
    relationshipTypeTokens = new BatchTokenHolder(types);
    indexStore = life.add(new IndexConfigStore(this.storeDir, fileSystem));
    schemaCache = new SchemaCache(neoStore.getSchemaStore());

    KernelExtensions extensions =
        life.add(
            new KernelExtensions(
                kernelExtensions,
                config,
                new DependencyResolverImpl(),
                UnsatisfiedDependencyStrategies.ignore()));

    life.start();

    SchemaIndexProvider provider =
        extensions.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    schemaIndexProviders = new DefaultSchemaIndexProviderMap(provider);
    labelScanStore =
        life.add(
            extensions
                .resolveDependency(
                    LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                .getLabelScanStore());
    actions = new BatchSchemaActions();

    // Record access
    recordAccess = new DirectRecordAccessSet(neoStore);
    relationshipCreator =
        new RelationshipCreator(
            RelationshipLocker.NO_LOCKING,
            new RelationshipGroupGetter(neoStore.getRelationshipGroupStore()),
            neoStore.getDenseNodeThreshold());
    propertyTraverser = new PropertyTraverser();
    propertyCreator = new PropertyCreator(getPropertyStore(), propertyTraverser);
    propertyDeletor = new PropertyDeleter(getPropertyStore(), propertyTraverser);
  }
예제 #15
0
  @Override
  public void start() throws IOException {
    life = new LifeSupport();

    readOnly = config.get(Configuration.read_only);

    storeDir = config.get(Configuration.store_dir);
    File store = config.get(Configuration.neo_store);
    storeFactory.ensureStoreExists();

    final TransactionFactory tf;
    if (providers.shouldInterceptCommitting()) {
      tf = new InterceptingTransactionFactory();
    } else {
      tf = new TransactionFactory();
    }
    neoStore = storeFactory.newNeoStore(store);

    neoStoreTransactionContextSupplier = new NeoStoreTransactionContextSupplier(neoStore);

    schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList());

    final NodeManager nodeManager = dependencyResolver.resolveDependency(NodeManager.class);
    Iterator<? extends Cache<?>> caches = nodeManager.caches().iterator();
    persistenceCache =
        new PersistenceCache(
            (AutoLoadingCache<NodeImpl>) caches.next(),
            (AutoLoadingCache<RelationshipImpl>) caches.next(),
            new Thunk<GraphPropertiesImpl>() {
              @Override
              public GraphPropertiesImpl evaluate() {
                return nodeManager.getGraphProperties();
              }
            },
            nodeManager);
    cacheAccess =
        new BridgingCacheAccess(nodeManager, schemaCache, updateableSchemaState, persistenceCache);

    try {
      indexProvider =
          dependencyResolver.resolveDependency(
              SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);

      // TODO: Build a real provider map
      DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider);

      indexingService =
          life.add(
              new IndexingService(
                  scheduler,
                  providerMap,
                  new NeoStoreIndexStoreView(locks, neoStore),
                  tokenNameLookup,
                  updateableSchemaState,
                  logging));

      integrityValidator = new IntegrityValidator(neoStore, indexingService);

      xaContainer =
          xaFactory.newXaContainer(
              this,
              config.get(Configuration.logical_log),
              new CommandFactory(neoStore, indexingService),
              new NeoStoreInjectedTransactionValidator(integrityValidator),
              tf,
              stateFactory,
              providers,
              readOnly);

      labelScanStore =
          life.add(
              dependencyResolver
                  .resolveDependency(
                      LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                  .getLabelScanStore());

      fileListing = new NeoStoreFileListing(xaContainer, storeDir, labelScanStore, indexingService);

      kernel =
          life.add(
              new Kernel(
                  txManager,
                  propertyKeyTokens,
                  labelTokens,
                  relationshipTypeTokens,
                  persistenceManager,
                  lockManager,
                  updateableSchemaState,
                  schemaWriteGuard,
                  indexingService,
                  nodeManager,
                  neoStore,
                  persistenceCache,
                  schemaCache,
                  providerMap,
                  labelScanStore,
                  readOnly));

      life.init();

      // TODO: Why isn't this done in the init() method of the indexing service?
      if (!readOnly) {
        neoStore.setRecoveredStatus(true);
        try {
          indexingService.initIndexes(loadIndexRules());
          xaContainer.openLogicalLog();
        } finally {
          neoStore.setRecoveredStatus(false);
        }
      }
      if (!xaContainer.getResourceManager().hasRecoveredTransactions()) {
        neoStore.makeStoreOk();
      } else {
        msgLog.debug("Waiting for TM to take care of recovered " + "transactions.");
      }
      idGenerators = new ArrayMap<>((byte) 5, false, false);
      this.idGenerators.put(Node.class, neoStore.getNodeStore());
      this.idGenerators.put(Relationship.class, neoStore.getRelationshipStore());
      this.idGenerators.put(RelationshipType.class, neoStore.getRelationshipTypeStore());
      this.idGenerators.put(Label.class, neoStore.getLabelTokenStore());
      this.idGenerators.put(PropertyStore.class, neoStore.getPropertyStore());
      this.idGenerators.put(
          PropertyKeyTokenRecord.class, neoStore.getPropertyStore().getPropertyKeyTokenStore());
      setLogicalLogAtCreationTime(xaContainer.getLogicalLog());

      life.start();
    } catch (Throwable e) { // Something unexpected happened during startup
      try { // Close the neostore, so that locks are released properly
        neoStore.close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }
  }
예제 #16
0
  private void writeTransactionsToActiveLogFile(File storeDir, Response<?> response)
      throws IOException {
    LifeSupport life = new LifeSupport();
    try {
      // Start the log and appender
      PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
      TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(10, 100);
      ReadOnlyLogVersionRepository logVersionRepository =
          new ReadOnlyLogVersionRepository(fs, storeDir);
      LogFile logFile =
          life.add(
              new PhysicalLogFile(
                  fs,
                  logFiles,
                  Long.MAX_VALUE /*don't rotate*/,
                  NO_PRUNING,
                  new ReadOnlyTransactionIdStore(fs, storeDir),
                  logVersionRepository,
                  new Monitors().newMonitor(PhysicalLogFile.Monitor.class),
                  LogRotationControl.NO_ROTATION_CONTROL,
                  transactionMetadataCache,
                  new NoRecoveryAssertingVisitor()));
      life.start();

      // Just write all transactions to the active log version. Remember that this is after a store
      // copy
      // where there are no logs, and the transaction stream we're about to write will probably
      // contain
      // transactions that goes some time back, before the last committed transaction id. So we
      // cannot
      // use a TransactionAppender, since it has checks for which transactions one can append.
      WritableLogChannel channel = logFile.getWriter();
      final TransactionLogWriter writer =
          new TransactionLogWriter(new LogEntryWriterv1(channel, new CommandWriter(channel)));
      final AtomicLong firstTxId = new AtomicLong(-1);
      response.accept(
          new Visitor<CommittedTransactionRepresentation, IOException>() {
            @Override
            public boolean visit(CommittedTransactionRepresentation transaction)
                throws IOException {
              long txId = transaction.getCommitEntry().getTxId();
              writer.append(transaction.getTransactionRepresentation(), txId);
              firstTxId.compareAndSet(-1, txId);
              return true;
            }
          });

      // And since we write this manually we need to set the correct transaction id in the
      // header of the log that we just wrote.
      writeLogHeader(
          fs,
          logFiles.getLogFileForVersion(logVersionRepository.getCurrentLogVersion()),
          logVersionRepository.getCurrentLogVersion(),
          firstTxId.get() != -1 ? firstTxId.get() - 1 : 0);

      if (firstTxId.get() == -1) {
        console.warn(
            "Important: There are no available transaction logs on the target database, which "
                + "means the backup could not save a point-in-time reference. This means you cannot use this "
                + "backup for incremental backups, and it means you cannot use it directly to seed an HA "
                + "cluster. The next time you perform a backup, a full backup will be done. If you wish to "
                + "use this backup as a seed for a cluster, you need to start a stand-alone database on "
                + "it, and commit one write transaction, to create the transaction log needed to seed the "
                + "cluster. To avoid this happening, make sure you never manually delete transaction log "
                + "files ("
                + PhysicalLogFile.DEFAULT_NAME
                + PhysicalLogFile.DEFAULT_VERSION_SUFFIX
                + "XXX), "
                + "and that you configure the database to keep at least a few days worth of transaction logs.");
      }
    } finally {
      life.shutdown();
    }
  }
예제 #17
0
  @Override
  public void start() throws IOException {
    dependencies = new Dependencies();
    life = new LifeSupport();
    storeDir = config.get(Configuration.store_dir);
    File store = config.get(Configuration.neo_store);
    if (!storeFactory.storeExists()) {
      storeFactory.createNeoStore().close();
    }

    indexProvider =
        dependencyResolver.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);

    // Monitor listeners
    LoggingLogFileMonitor loggingLogMonitor =
        new LoggingLogFileMonitor(logging.getMessagesLog(getClass()));
    monitors.addMonitorListener(loggingLogMonitor);
    monitors.addMonitorListener(
        new RecoveryVisitor.Monitor() {
          @Override
          public void transactionRecovered(long txId) {
            recoveredCount.incrementAndGet();
          }
        });

    // Upgrade the store before we begin
    upgradeStore(store, storeMigrationProcess, indexProvider);

    // Build all modules and their services
    try {
      final NeoStoreModule neoStoreModule =
          buildNeoStore(storeFactory, labelTokens, relationshipTypeTokens, propertyKeyTokenHolder);
      this.neoStoreModule =
          neoStoreModule; // TODO The only reason this is here is because of the provider-stuff for
      // DiskLayer. Remove when possible

      CacheModule cacheModule =
          buildCaches(
              neoStoreModule.neoStore(),
              cacheProvider,
              nodeManager,
              labelTokens,
              relationshipTypeTokens,
              propertyKeyTokenHolder);

      IndexingModule indexingModule =
          buildIndexing(
              config,
              scheduler,
              indexProvider,
              lockService,
              tokenNameLookup,
              logging,
              indexingServiceMonitor,
              neoStoreModule.neoStore(),
              cacheModule.updateableSchemaState());

      StoreLayerModule storeLayerModule =
          buildStoreLayer(
              config,
              neoStoreModule.neoStore(),
              cacheModule.persistenceCache(),
              propertyKeyTokenHolder,
              labelTokens,
              relationshipTypeTokens,
              indexingModule.indexingService(),
              cacheModule.schemaCache());

      TransactionLogModule transactionLogModule =
          buildTransactionLogs(
              config,
              logging,
              indexingModule.labelScanStore(),
              fs,
              neoStoreModule.neoStore(),
              cacheModule.cacheAccess(),
              indexingModule.indexingService(),
              indexProviders.values());

      buildRecovery(
          fs,
          cacheModule.cacheAccess(),
          indexingModule.indexingService(),
          indexingModule.labelScanStore(),
          neoStoreModule.neoStore(),
          monitors.newMonitor(RecoveryVisitor.Monitor.class),
          monitors.newMonitor(Recovery.Monitor.class),
          transactionLogModule.logFiles(),
          transactionLogModule.logRotationControl(),
          startupStatistics);

      KernelModule kernelModule =
          buildKernel(
              indexingModule.integrityValidator(),
              transactionLogModule.logicalTransactionStore(),
              neoStoreModule.neoStore(),
              transactionLogModule.storeApplier(),
              indexingModule.indexingService(),
              storeLayerModule.storeLayer(),
              cacheModule.updateableSchemaState(),
              indexingModule.labelScanStore(),
              cacheModule.persistenceCache(),
              indexingModule.schemaIndexProviderMap());

      // Do these assignments last so that we can ensure no cyclical dependencies exist
      this.cacheModule = cacheModule;
      this.indexingModule = indexingModule;
      this.storeLayerModule = storeLayerModule;
      this.transactionLogModule = transactionLogModule;
      this.kernelModule = kernelModule;

      dependencies.satisfyDependency(this);
      satisfyDependencies(
          neoStoreModule,
          cacheModule,
          indexingModule,
          storeLayerModule,
          transactionLogModule,
          kernelModule);
    } catch (Throwable e) { // Something unexpected happened during startup
      try { // Close the neostore, so that locks are released properly
        neoStoreModule.neoStore().close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }

    try {
      life.start();
    } catch (Throwable e) { // Something unexpected happened during startup

      try { // Close the neostore, so that locks are released properly
        neoStoreModule.neoStore().close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }
  }
예제 #18
0
  private void checkDataConsistency(
      RequestContextFactory requestContextFactory,
      NeoStoreXaDataSource nioneoDataSource,
      URI masterUri)
      throws Throwable {
    // Must be called under lock on XaDataSourceManager
    LifeSupport checkConsistencyLife = new LifeSupport();
    TransactionIdStore txIdStore = null;
    try {
      MasterClient checkConsistencyMaster =
          newMasterClient(masterUri, nioneoDataSource.getStoreId(), checkConsistencyLife);
      checkConsistencyLife.start();
      console.log("Checking store consistency with master");
      txIdStore =
          nioneoDataSource.getDependencyResolver().resolveDependency(TransactionIdStore.class);
      checkDataConsistencyWithMaster(
          masterUri, checkConsistencyMaster, nioneoDataSource, txIdStore);
      console.log("Store is consistent");

      /*
       * Pull updates, since the store seems happy and everything. No matter how far back we are, this is just
       * one thread doing the pulling, while the guard is up. This will prevent a race between all transactions
       * that may start the moment the database becomes available, where all of them will pull the same txs from
       * the master but eventually only one will get to apply them.
       */
      console.log("Catching up with master");

      resolver
          .resolveDependency(TransactionCommittingResponseUnpacker.class)
          .unpackResponse(
              checkConsistencyMaster.pullUpdates(requestContextFactory.newRequestContext()));
      console.log("Now consistent with master");
    } catch (NoSuchLogVersionException e) {
      msgLog.logMessage(
          "Cannot catch up to master by pulling updates, because I cannot find the archived "
              + "logical log file that has the transaction I would start from. I'm going to copy the whole "
              + "store from the master instead.");
      try {
        stopServicesAndHandleBranchedStore(config.get(HaSettings.branched_data_policy));
      } catch (Throwable throwable) {
        msgLog.warn("Failed preparing for copying the store from the master instance", throwable);
      }
      throw e;
    } catch (StoreUnableToParticipateInClusterException upe) {
      console.log(
          "The store is inconsistent. Will treat it as branched and fetch a new one from the master");
      msgLog.warn(
          "Current store is unable to participate in the cluster; fetching new store from master",
          upe);
      try {
        stopServicesAndHandleBranchedStore(config.get(HaSettings.branched_data_policy));
      } catch (IOException e) {
        msgLog.warn("Failed while trying to handle branched data", e);
      }

      throw upe;
    } catch (MismatchingStoreIdException e) {
      console.log(
          "The store does not represent the same database as master. Will remove and fetch a new one from master");
      if (txIdStore.getLastCommittingTransactionId() == 0) {
        msgLog.warn("Found and deleting empty store with mismatching store id " + e.getMessage());
        stopServicesAndHandleBranchedStore(BranchedDataPolicy.keep_none);
      } else {
        msgLog.error("Store cannot participate in cluster due to mismatching store IDs");
      }
      throw e;
    } finally {
      checkConsistencyLife.shutdown();
    }
  }
예제 #19
0
  BatchInserterImpl(
      String storeDir,
      FileSystemAbstraction fileSystem,
      Map<String, String> stringParams,
      Iterable<KernelExtensionFactory<?>> kernelExtensions) {
    life = new LifeSupport();
    this.fileSystem = fileSystem;
    this.storeDir = new File(FileUtils.fixSeparatorsInPath(storeDir));

    rejectAutoUpgrade(stringParams);
    msgLog = StringLogger.loggerDirectory(fileSystem, this.storeDir);
    logging = new SingleLoggingService(msgLog);
    Map<String, String> params = getDefaultParams();
    params.put(GraphDatabaseSettings.use_memory_mapped_buffers.name(), Settings.FALSE);
    params.put(InternalAbstractGraphDatabase.Configuration.store_dir.name(), storeDir);
    params.putAll(stringParams);

    storeLocker = new StoreLocker(fileSystem);
    storeLocker.checkLock(this.storeDir);

    config = new Config(params, GraphDatabaseSettings.class);
    boolean dump = config.get(GraphDatabaseSettings.dump_configuration);
    this.idGeneratorFactory = new DefaultIdGeneratorFactory();

    StoreFactory sf =
        new StoreFactory(
            config, idGeneratorFactory, new DefaultWindowPoolFactory(), fileSystem, msgLog, null);

    File store = fixPath(this.storeDir, sf);

    if (dump) {
      dumpConfiguration(params);
    }
    msgLog.logMessage(Thread.currentThread() + " Starting BatchInserter(" + this + ")");
    neoStore = sf.newNeoStore(store);
    if (!neoStore.isStoreOk()) {
      throw new IllegalStateException(storeDir + " store is not cleanly shutdown.");
    }
    neoStore.makeStoreOk();
    Token[] indexes = getPropertyKeyTokenStore().getTokens(10000);
    propertyKeyTokens = new BatchTokenHolder(indexes);
    labelTokens = new BatchTokenHolder(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    Token[] types = getRelationshipTypeStore().getTokens(Integer.MAX_VALUE);
    relationshipTypeTokens = new BatchTokenHolder(types);
    indexStore = life.add(new IndexStore(this.storeDir, fileSystem));
    schemaCache = new SchemaCache(neoStore.getSchemaStore());

    KernelExtensions extensions =
        life.add(
            new KernelExtensions(
                kernelExtensions,
                config,
                new DependencyResolverImpl(),
                UnsatisfiedDependencyStrategies.ignore()));

    life.start();

    SchemaIndexProvider provider =
        extensions.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    schemaIndexProviders = new DefaultSchemaIndexProviderMap(provider);
    labelScanStore =
        life.add(
            extensions
                .resolveDependency(
                    LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                .getLabelScanStore());
    actions = new BatchSchemaActions();
  }