コード例 #1
0
  private URI startHaCommunication(
      LifeSupport haCommunicationLife,
      NeoStoreXaDataSource nioneoDataSource,
      URI me,
      URI masterUri) {
    MasterClient master =
        newMasterClient(masterUri, nioneoDataSource.getStoreId(), haCommunicationLife);

    Slave slaveImpl =
        new SlaveImpl(
            nioneoDataSource.getStoreId(), resolver.resolveDependency(UpdatePuller.class));

    SlaveServer server =
        new SlaveServer(
            slaveImpl, serverConfig(), logging, resolver.resolveDependency(Monitors.class));

    masterDelegateHandler.setDelegate(master);

    haCommunicationLife.add(slaveImpl);
    haCommunicationLife.add(server);
    haCommunicationLife.start();

    URI slaveHaURI = createHaURI(me, server);
    clusterMemberAvailability.memberIsAvailable(HighAvailabilityModeSwitcher.SLAVE, slaveHaURI);

    return slaveHaURI;
  }
コード例 #2
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private IndexingModule buildIndexing(
      Config config,
      JobScheduler scheduler,
      SchemaIndexProvider indexProvider,
      LockService lockService,
      TokenNameLookup tokenNameLookup,
      Logging logging,
      IndexingService.Monitor indexingServiceMonitor,
      NeoStore neoStore,
      UpdateableSchemaState updateableSchemaState) {
    final DefaultSchemaIndexProviderMap providerMap =
        new DefaultSchemaIndexProviderMap(indexProvider);

    final IndexingService indexingService =
        IndexingService.create(
            new IndexSamplingConfig(config),
            scheduler,
            providerMap,
            new NeoStoreIndexStoreView(lockService, neoStore),
            tokenNameLookup,
            updateableSchemaState,
            toList(new SchemaStorage(neoStore.getSchemaStore()).allIndexRules()),
            logging,
            indexingServiceMonitor);
    final IntegrityValidator integrityValidator = new IntegrityValidator(neoStore, indexingService);

    // TODO Move to constructor
    final LabelScanStore labelScanStore =
        dependencyResolver
            .resolveDependency(
                LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
            .getLabelScanStore();

    life.add(indexingService);
    life.add(labelScanStore);

    return new IndexingModule() {
      @Override
      public IndexingService indexingService() {
        return indexingService;
      }

      @Override
      public LabelScanStore labelScanStore() {
        return labelScanStore;
      }

      @Override
      public IntegrityValidator integrityValidator() {
        return integrityValidator;
      }

      @Override
      public SchemaIndexProviderMap schemaIndexProviderMap() {
        return providerMap;
      }
    };
  }
コード例 #3
0
  @Test
  public void shouldSendAMessageFromAClientWhichIsReceivedByAServer() throws Exception {

    // given

    CountDownLatch latch = new CountDownLatch(1);

    LifeSupport life = new LifeSupport();

    Server server1 =
        new Server(
            latch,
            MapUtil.stringMap(
                ClusterSettings.cluster_server.name(),
                "localhost:1234",
                ClusterSettings.server_id.name(),
                "1",
                ClusterSettings.initial_hosts.name(),
                "localhost:1234,localhost:1235"));

    life.add(server1);

    Server server2 =
        new Server(
            latch,
            MapUtil.stringMap(
                ClusterSettings.cluster_server.name(),
                "localhost:1235",
                ClusterSettings.server_id.name(),
                "2",
                ClusterSettings.initial_hosts.name(),
                "localhost:1234,localhost:1235"));

    life.add(server2);

    life.start();

    // when

    server1.process(
        Message.to(TestMessage.helloWorld, URI.create("cluster://127.0.0.1:1235"), "Hello World"));

    // then

    latch.await(5, TimeUnit.SECONDS);

    assertTrue("server1 should have processed the message", server1.processedMessage());
    assertTrue("server2 should have processed the message", server2.processedMessage());

    life.shutdown();
  }
コード例 #4
0
  @Override
  protected IdGeneratorFactory createIdGeneratorFactory() {
    idGeneratorFactory = new HaIdGeneratorFactory(master, logging);
    highAvailabilityModeSwitcher =
        new HighAvailabilityModeSwitcher(
            clusterClient,
            masterDelegateInvocationHandler,
            clusterMemberAvailability,
            memberStateMachine,
            this,
            (HaIdGeneratorFactory) idGeneratorFactory,
            config,
            logging,
            updateableSchemaState,
            kernelExtensions.listFactories(),
            monitors);
    /*
     * We always need the mode switcher and we need it to restart on switchover.
     */
    paxosLife.add(highAvailabilityModeSwitcher);

    /*
     * We don't really switch to master here. We just need to initialize the idGenerator so the initial store
     * can be started (if required). In any case, the rest of the database is in pending state, so nothing will
     * happen until events start arriving and that will set us to the proper state anyway.
     */
    ((HaIdGeneratorFactory) idGeneratorFactory).switchToMaster();

    return idGeneratorFactory;
  }
コード例 #5
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private NeoStoreModule buildNeoStore(
      StoreFactory storeFactory,
      final LabelTokenHolder labelTokens,
      final RelationshipTypeTokenHolder relationshipTypeTokens,
      final PropertyKeyTokenHolder propertyKeyTokenHolder) {
    final NeoStore neoStore = storeFactory.newNeoStore(false, true);

    life.add(
        new LifecycleAdapter() {
          @Override
          public void start() {
            if (startupStatistics.numberOfRecoveredTransactions() > 0) {
              neoStore.rebuildIdGenerators();
            }
            neoStoreModule.neoStore().makeStoreOk();

            propertyKeyTokenHolder.addTokens(
                neoStoreModule.neoStore().getPropertyKeyTokenStore().getTokens(Integer.MAX_VALUE));
            relationshipTypeTokens.addTokens(
                neoStoreModule
                    .neoStore()
                    .getRelationshipTypeTokenStore()
                    .getTokens(Integer.MAX_VALUE));
            labelTokens.addTokens(
                neoStoreModule.neoStore().getLabelTokenStore().getTokens(Integer.MAX_VALUE));
          }
        });

    return new NeoStoreModule() {
      @Override
      public NeoStore neoStore() {
        return neoStore;
      }
    };
  }
コード例 #6
0
  public BatchingNeoStore(
      FileSystemAbstraction fileSystem,
      String storeDir,
      Configuration config,
      Monitor writeMonitor,
      Logging logging,
      WriterFactory writerFactory,
      Monitors monitors) {
    this.config = config;
    this.writeMonitor = writeMonitor;
    this.writerFactory = writerFactory;
    this.monitors = monitors;
    this.fileSystem = life.add(new ChannelReusingFileSystemAbstraction(fileSystem));

    this.logger = logging.getMessagesLog(getClass());
    this.neo4jConfig =
        configForStoreDir(
            new Config(
                stringMap(dense_node_threshold.name(), valueOf(config.denseNodeThreshold())),
                GraphDatabaseSettings.class),
            new File(storeDir));

    this.neoStore = newBatchWritingNeoStore();
    this.propertyKeyRepository =
        new BatchingPropertyKeyTokenRepository(neoStore.getPropertyKeyTokenStore());
    this.labelRepository = new BatchingLabelTokenRepository(neoStore.getLabelTokenStore());
    this.relationshipTypeRepository =
        new BatchingRelationshipTypeTokenRepository(neoStore.getRelationshipTypeTokenStore());
    life.start();
  }
コード例 #7
0
  @Override
  protected IdGeneratorFactory createIdGeneratorFactory() {

    idGeneratorFactory = new HaIdGeneratorFactory(master, memberStateMachine, logging);
    highAvailabilityModeSwitcher =
        new HighAvailabilityModeSwitcher(
            masterDelegateInvocationHandler,
            clusterMemberAvailability,
            memberStateMachine,
            this,
            (HaIdGeneratorFactory) idGeneratorFactory,
            config,
            logging);
    /*
     * We always need the mode switcher and we need it to restart on switchover. So:
     * 1) if in compatibility mode, it must be added in all 3 - to start on start and restart on switchover
     * 2) if not in compatibility mode it must be added in paxosLife, which is started anyway.
     */
    paxosLife.add(highAvailabilityModeSwitcher);
    if (compatibilityMode) {
      compatibilityLifecycle.add(1, highAvailabilityModeSwitcher);
      life.add(highAvailabilityModeSwitcher);
    }

    return idGeneratorFactory;
  }
コード例 #8
0
    private Server(final CountDownLatch latch, final Map<String, String> config) {
      final Config conf = new Config(config, ClusterSettings.class);
      networkReceiver =
          life.add(
              new NetworkReceiver(
                  new NetworkReceiver.Configuration() {
                    @Override
                    public HostnamePort clusterServer() {
                      return conf.get(ClusterSettings.cluster_server);
                    }
                  },
                  new DevNullLoggingService()));

      networkSender =
          life.add(
              new NetworkSender(
                  new NetworkSender.Configuration() {
                    @Override
                    public int defaultPort() {
                      return 5001;
                    }

                    @Override
                    public int port() {
                      return conf.get(ClusterSettings.cluster_server).getPort();
                    }
                  },
                  networkReceiver,
                  new DevNullLoggingService()));

      life.add(
          new LifecycleAdapter() {
            @Override
            public void start() throws Throwable {
              networkReceiver.addMessageProcessor(
                  new MessageProcessor() {
                    @Override
                    public boolean process(Message<? extends MessageType> message) {
                      // server receives a message
                      processedMessage.set(true);
                      latch.countDown();
                      return true;
                    }
                  });
            }
          });
    }
コード例 #9
0
 @Override
 public void init() throws Throwable {
   client = new ZooClient(logger.getLogger(ZooClient.class), config);
   life.add(client);
   client.addZooListener(new ZooHaEventListener());
   client.addCompatibilityModeListener(new ZooCompatibilityModeListener());
   life.init();
 }
コード例 #10
0
 @Override
 public void init() {
   luceneDataSource = life.add(new LuceneDataSource(config, indexStore, fileSystemAbstraction));
   // TODO Don't do this here, do proper life cycle management
   life.start();
   LuceneIndexImplementation indexImplementation = new LuceneIndexImplementation(luceneDataSource);
   indexProviders.registerIndexProvider(
       LuceneIndexImplementation.SERVICE_NAME, indexImplementation);
 }
コード例 #11
0
ファイル: ClusterManager.java プロジェクト: nitecoder/neo4j
  @Override
  public void start() throws Throwable {
    Clusters clusters = clustersProvider.clusters();

    life = new LifeSupport();

    for (int i = 0; i < clusters.getClusters().size(); i++) {
      Clusters.Cluster cluster = clusters.getClusters().get(i);
      ManagedCluster managedCluster = new ManagedCluster(cluster);
      clusterMap.put(cluster.getName(), managedCluster);
      life.add(managedCluster);
    }

    life.start();
  }
コード例 #12
0
 /** Public for testing purpose. Do not use. */
 public synchronized void registerDataSource(final XaDataSource dataSource) {
   dataSources.put(dataSource.getName(), dataSource);
   branchIdMapping.put(UTF8.decode(dataSource.getBranchId()), dataSource);
   sourceIdMapping.put(dataSource.getName(), dataSource.getBranchId());
   life.add(dataSource);
   if (life.getStatus().equals(LifecycleStatus.STARTED)) {
     Listeners.notifyListeners(
         dsRegistrationListeners,
         new Listeners.Notification<DataSourceRegistrationListener>() {
           @Override
           public void notify(DataSourceRegistrationListener listener) {
             listener.registeredDataSource(dataSource);
           }
         });
   }
 }
コード例 #13
0
 @Override
 public void start() throws Throwable {
   life = new LifeSupport();
   for (XaDataSource ds : dataSources.values()) {
     life.add(ds);
   }
   life.start();
   for (DataSourceRegistrationListener listener : dsRegistrationListeners) {
     try {
       for (XaDataSource ds : dataSources.values()) {
         listener.registeredDataSource(ds);
       }
     } catch (Throwable t) {
       msgLog.logMessage("Failed when notifying registering listener", t);
     }
   }
 }
コード例 #14
0
ファイル: ClusterManager.java プロジェクト: francom/neo4j
  @Override
  public void start() throws Throwable {
    Clusters clusters = clustersProvider.clusters();

    life = new LifeSupport();

    // Started so instances added here will be started immediately, and in case of exceptions they
    // can be
    // shutdown() or stop()ped properly
    life.start();

    for (int i = 0; i < clusters.getClusters().size(); i++) {
      Clusters.Cluster cluster = clusters.getClusters().get(i);
      ManagedCluster managedCluster = new ManagedCluster(cluster);
      clusterMap.put(cluster.getName(), managedCluster);
      life.add(managedCluster);
    }
  }
コード例 #15
0
  @Override
  protected TxHook createTxHook() {
    clusterEventsDelegateInvocationHandler = new DelegateInvocationHandler();
    memberContextDelegateInvocationHandler = new DelegateInvocationHandler();
    clusterMemberAvailabilityDelegateInvocationHandler = new DelegateInvocationHandler();

    clusterEvents =
        (ClusterMemberEvents)
            Proxy.newProxyInstance(
                ClusterMemberEvents.class.getClassLoader(),
                new Class[] {ClusterMemberEvents.class, Lifecycle.class},
                clusterEventsDelegateInvocationHandler);
    memberContext =
        (HighAvailabilityMemberContext)
            Proxy.newProxyInstance(
                HighAvailabilityMemberContext.class.getClassLoader(),
                new Class[] {HighAvailabilityMemberContext.class},
                memberContextDelegateInvocationHandler);
    clusterMemberAvailability =
        (ClusterMemberAvailability)
            Proxy.newProxyInstance(
                ClusterMemberAvailability.class.getClassLoader(),
                new Class[] {ClusterMemberAvailability.class},
                clusterMemberAvailabilityDelegateInvocationHandler);

    /*
     *  We need to create these anyway since even in compatibility mode we'll use them for switchover. If it turns
     *  out we are not going to need zookeeper, just assign them to the class fields. The difference is in when
     *  they start().
     */
    ElectionCredentialsProvider electionCredentialsProvider =
        config.get(HaSettings.slave_only)
            ? new NotElectableElectionCredentialsProvider()
            : new DefaultElectionCredentialsProvider(
                config.get(ClusterSettings.server_id),
                new OnDiskLastTxIdGetter(new File(getStoreDir())),
                new HighAvailabilityMemberInfoProvider() {
                  @Override
                  public HighAvailabilityMemberState getHighAvailabilityMemberState() {
                    return memberStateMachine.getCurrentState();
                  }
                });

    clusterClient =
        new ClusterClient(ClusterClient.adapt(config), logging, electionCredentialsProvider);
    PaxosClusterMemberEvents localClusterEvents =
        new PaxosClusterMemberEvents(
            clusterClient,
            clusterClient,
            clusterClient,
            clusterClient,
            logging,
            new Predicate<PaxosClusterMemberEvents.ClusterMembersSnapshot>() {
              @Override
              public boolean accept(PaxosClusterMemberEvents.ClusterMembersSnapshot item) {
                for (MemberIsAvailable member : item.getCurrentAvailableMembers()) {
                  if (member.getRoleUri().getScheme().equals("ha")) {
                    if (HighAvailabilityModeSwitcher.getServerId(member.getRoleUri())
                        == config.get(ClusterSettings.server_id)) {
                      msgLog.error(
                          String.format(
                              "Instance %s has the same serverId as ours (%d) - will not join this cluster",
                              member.getRoleUri(), config.get(ClusterSettings.server_id)));
                      return true;
                    }
                  }
                }
                return true;
              }
            });

    // Force a reelection after we enter the cluster
    // and when that election is finished refresh the snapshot
    clusterClient.addClusterListener(
        new ClusterListener.Adapter() {
          @Override
          public void enteredCluster(ClusterConfiguration clusterConfiguration) {
            clusterClient.performRoleElections();
          }

          @Override
          public void elected(String role, InstanceId instanceId, URI electedMember) {
            if (role.equals(ClusterConfiguration.COORDINATOR)) {
              clusterClient.refreshSnapshot();
              clusterClient.removeClusterListener(this);
            }
          }
        });

    HighAvailabilityMemberContext localMemberContext =
        new SimpleHighAvailabilityMemberContext(clusterClient.getServerId());
    PaxosClusterMemberAvailability localClusterMemberAvailability =
        new PaxosClusterMemberAvailability(
            clusterClient.getServerId(), clusterClient, clusterClient, logging);

    // Here we decide whether to start in compatibility mode or mode or not
    if (!config.get(HaSettings.coordinators).isEmpty()
        && !config.get(HaSettings.coordinators).get(0).toString().trim().equals("")) {
      compatibilityMode = true;
      compatibilityLifecycle = new LinkedList<Lifecycle>();

      Switchover switchover =
          new ZooToPaxosSwitchover(
              life,
              paxosLife,
              compatibilityLifecycle,
              clusterEventsDelegateInvocationHandler,
              memberContextDelegateInvocationHandler,
              clusterMemberAvailabilityDelegateInvocationHandler,
              localClusterEvents,
              localMemberContext,
              localClusterMemberAvailability);

      ZooKeeperHighAvailabilityEvents zkEvents =
          new ZooKeeperHighAvailabilityEvents(logging, config, switchover);
      compatibilityLifecycle.add(zkEvents);
      memberContextDelegateInvocationHandler.setDelegate(
          new SimpleHighAvailabilityMemberContext(zkEvents.getInstanceId()));
      clusterEventsDelegateInvocationHandler.setDelegate(zkEvents);
      clusterMemberAvailabilityDelegateInvocationHandler.setDelegate(zkEvents);
      // Paxos Events added to life, won't be stopped because it isn't started yet
      paxosLife.add(localClusterEvents);
    } else {
      memberContextDelegateInvocationHandler.setDelegate(localMemberContext);
      clusterEventsDelegateInvocationHandler.setDelegate(localClusterEvents);
      clusterMemberAvailabilityDelegateInvocationHandler.setDelegate(
          localClusterMemberAvailability);
    }

    members =
        new ClusterMembers(
            clusterClient,
            clusterClient,
            clusterEvents,
            new InstanceId(config.get(ClusterSettings.server_id)));
    memberStateMachine =
        new HighAvailabilityMemberStateMachine(
            memberContext,
            accessGuard,
            members,
            clusterEvents,
            clusterClient,
            logging.getLogger(HighAvailabilityMemberStateMachine.class));

    if (compatibilityMode) {
      /*
       * In here goes stuff that needs to stop when switching. If added in paxosLife too they will be restarted.
       * Adding to life starts them when life.start is called - adding them to compatibilityLifeCycle shuts them
       * down on switchover
       */
      compatibilityLifecycle.add(memberStateMachine);
      //            compatibilityLifecycle.add( highAvailabilityModeSwitcher );
      compatibilityLifecycle.add((Lifecycle) clusterEvents);
      life.add(memberStateMachine);
      //            life.add( highAvailabilityModeSwitcher );
      life.add(clusterEvents);
    }
    /*
     * Here goes stuff that needs to start when paxos kicks in:
     * In Normal (non compatibility mode): That means they start normally
     * In Compatibility Mode: That means they start when switchover happens. If added to life too they will be
     * restarted
     */
    paxosLife.add(memberStateMachine);
    paxosLife.add(clusterEvents);
    paxosLife.add(clusterClient);
    paxosLife.add(localClusterMemberAvailability);

    DelegateInvocationHandler<TxHook> txHookDelegate = new DelegateInvocationHandler<TxHook>();
    TxHook txHook =
        (TxHook)
            Proxy.newProxyInstance(
                TxHook.class.getClassLoader(), new Class[] {TxHook.class}, txHookDelegate);
    new TxHookModeSwitcher(
        memberStateMachine,
        txHookDelegate,
        master,
        new TxHookModeSwitcher.RequestContextFactoryResolver() {
          @Override
          public RequestContextFactory get() {
            return requestContextFactory;
          }
        },
        dependencyResolver);
    return txHook;
  }
コード例 #16
0
  BatchInserterImpl(
      String storeDir,
      FileSystemAbstraction fileSystem,
      Map<String, String> stringParams,
      Iterable<KernelExtensionFactory<?>> kernelExtensions) {
    rejectAutoUpgrade(stringParams);
    Map<String, String> params = getDefaultParams();
    params.putAll(stringParams);
    config =
        StoreFactory.configForStoreDir(
            new Config(params, GraphDatabaseSettings.class), new File(storeDir));

    life = new LifeSupport();
    this.fileSystem = fileSystem;
    this.storeDir = new File(FileUtils.fixSeparatorsInPath(storeDir));
    Neo4jJobScheduler jobScheduler = life.add(new Neo4jJobScheduler());
    LifecycledPageCache pageCache =
        life.add(new LifecycledPageCache(fileSystem, jobScheduler, config));

    msgLog = StringLogger.loggerDirectory(fileSystem, this.storeDir);
    logging = new SingleLoggingService(msgLog);
    storeLocker = new StoreLocker(fileSystem);
    storeLocker.checkLock(this.storeDir);

    boolean dump = config.get(GraphDatabaseSettings.dump_configuration);
    this.idGeneratorFactory = new DefaultIdGeneratorFactory();

    Monitors monitors = new Monitors();
    StoreFactory sf =
        new StoreFactory(config, idGeneratorFactory, pageCache, fileSystem, msgLog, monitors);

    if (dump) {
      dumpConfiguration(params);
    }
    msgLog.logMessage(Thread.currentThread() + " Starting BatchInserter(" + this + ")");
    neoStore = sf.newNeoStore(true);
    if (!neoStore.isStoreOk()) {
      throw new IllegalStateException(storeDir + " store is not cleanly shutdown.");
    }
    neoStore.makeStoreOk();
    Token[] indexes = getPropertyKeyTokenStore().getTokens(10000);
    propertyKeyTokens = new BatchTokenHolder(indexes);
    labelTokens = new BatchTokenHolder(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    Token[] types = getRelationshipTypeStore().getTokens(Integer.MAX_VALUE);
    relationshipTypeTokens = new BatchTokenHolder(types);
    indexStore = life.add(new IndexConfigStore(this.storeDir, fileSystem));
    schemaCache = new SchemaCache(neoStore.getSchemaStore());

    KernelExtensions extensions =
        life.add(
            new KernelExtensions(
                kernelExtensions,
                config,
                new DependencyResolverImpl(),
                UnsatisfiedDependencyStrategies.ignore()));

    life.start();

    SchemaIndexProvider provider =
        extensions.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    schemaIndexProviders = new DefaultSchemaIndexProviderMap(provider);
    labelScanStore =
        life.add(
            extensions
                .resolveDependency(
                    LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                .getLabelScanStore());
    actions = new BatchSchemaActions();

    // Record access
    recordAccess = new DirectRecordAccessSet(neoStore);
    relationshipCreator =
        new RelationshipCreator(
            RelationshipLocker.NO_LOCKING,
            new RelationshipGroupGetter(neoStore.getRelationshipGroupStore()),
            neoStore.getDenseNodeThreshold());
    propertyTraverser = new PropertyTraverser();
    propertyCreator = new PropertyCreator(getPropertyStore(), propertyTraverser);
    propertyDeletor = new PropertyDeleter(getPropertyStore(), propertyTraverser);
  }
コード例 #17
0
  private void writeTransactionsToActiveLogFile(File storeDir, Response<?> response)
      throws IOException {
    LifeSupport life = new LifeSupport();
    try {
      // Start the log and appender
      PhysicalLogFiles logFiles = new PhysicalLogFiles(storeDir, fs);
      TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(10, 100);
      ReadOnlyLogVersionRepository logVersionRepository =
          new ReadOnlyLogVersionRepository(fs, storeDir);
      LogFile logFile =
          life.add(
              new PhysicalLogFile(
                  fs,
                  logFiles,
                  Long.MAX_VALUE /*don't rotate*/,
                  NO_PRUNING,
                  new ReadOnlyTransactionIdStore(fs, storeDir),
                  logVersionRepository,
                  new Monitors().newMonitor(PhysicalLogFile.Monitor.class),
                  LogRotationControl.NO_ROTATION_CONTROL,
                  transactionMetadataCache,
                  new NoRecoveryAssertingVisitor()));
      life.start();

      // Just write all transactions to the active log version. Remember that this is after a store
      // copy
      // where there are no logs, and the transaction stream we're about to write will probably
      // contain
      // transactions that goes some time back, before the last committed transaction id. So we
      // cannot
      // use a TransactionAppender, since it has checks for which transactions one can append.
      WritableLogChannel channel = logFile.getWriter();
      final TransactionLogWriter writer =
          new TransactionLogWriter(new LogEntryWriterv1(channel, new CommandWriter(channel)));
      final AtomicLong firstTxId = new AtomicLong(-1);
      response.accept(
          new Visitor<CommittedTransactionRepresentation, IOException>() {
            @Override
            public boolean visit(CommittedTransactionRepresentation transaction)
                throws IOException {
              long txId = transaction.getCommitEntry().getTxId();
              writer.append(transaction.getTransactionRepresentation(), txId);
              firstTxId.compareAndSet(-1, txId);
              return true;
            }
          });

      // And since we write this manually we need to set the correct transaction id in the
      // header of the log that we just wrote.
      writeLogHeader(
          fs,
          logFiles.getLogFileForVersion(logVersionRepository.getCurrentLogVersion()),
          logVersionRepository.getCurrentLogVersion(),
          firstTxId.get() != -1 ? firstTxId.get() - 1 : 0);

      if (firstTxId.get() == -1) {
        console.warn(
            "Important: There are no available transaction logs on the target database, which "
                + "means the backup could not save a point-in-time reference. This means you cannot use this "
                + "backup for incremental backups, and it means you cannot use it directly to seed an HA "
                + "cluster. The next time you perform a backup, a full backup will be done. If you wish to "
                + "use this backup as a seed for a cluster, you need to start a stand-alone database on "
                + "it, and commit one write transaction, to create the transaction log needed to seed the "
                + "cluster. To avoid this happening, make sure you never manually delete transaction log "
                + "files ("
                + PhysicalLogFile.DEFAULT_NAME
                + PhysicalLogFile.DEFAULT_VERSION_SUFFIX
                + "XXX), "
                + "and that you configure the database to keep at least a few days worth of transaction logs.");
      }
    } finally {
      life.shutdown();
    }
  }
コード例 #18
0
  @Override
  public void start() throws IOException {
    life = new LifeSupport();

    readOnly = config.get(Configuration.read_only);

    storeDir = config.get(Configuration.store_dir);
    File store = config.get(Configuration.neo_store);
    storeFactory.ensureStoreExists();

    final TransactionFactory tf;
    if (providers.shouldInterceptCommitting()) {
      tf = new InterceptingTransactionFactory();
    } else {
      tf = new TransactionFactory();
    }
    neoStore = storeFactory.newNeoStore(store);

    neoStoreTransactionContextSupplier = new NeoStoreTransactionContextSupplier(neoStore);

    schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList());

    final NodeManager nodeManager = dependencyResolver.resolveDependency(NodeManager.class);
    Iterator<? extends Cache<?>> caches = nodeManager.caches().iterator();
    persistenceCache =
        new PersistenceCache(
            (AutoLoadingCache<NodeImpl>) caches.next(),
            (AutoLoadingCache<RelationshipImpl>) caches.next(),
            new Thunk<GraphPropertiesImpl>() {
              @Override
              public GraphPropertiesImpl evaluate() {
                return nodeManager.getGraphProperties();
              }
            },
            nodeManager);
    cacheAccess =
        new BridgingCacheAccess(nodeManager, schemaCache, updateableSchemaState, persistenceCache);

    try {
      indexProvider =
          dependencyResolver.resolveDependency(
              SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);

      // TODO: Build a real provider map
      DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap(indexProvider);

      indexingService =
          life.add(
              new IndexingService(
                  scheduler,
                  providerMap,
                  new NeoStoreIndexStoreView(locks, neoStore),
                  tokenNameLookup,
                  updateableSchemaState,
                  logging));

      integrityValidator = new IntegrityValidator(neoStore, indexingService);

      xaContainer =
          xaFactory.newXaContainer(
              this,
              config.get(Configuration.logical_log),
              new CommandFactory(neoStore, indexingService),
              new NeoStoreInjectedTransactionValidator(integrityValidator),
              tf,
              stateFactory,
              providers,
              readOnly);

      labelScanStore =
          life.add(
              dependencyResolver
                  .resolveDependency(
                      LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                  .getLabelScanStore());

      fileListing = new NeoStoreFileListing(xaContainer, storeDir, labelScanStore, indexingService);

      kernel =
          life.add(
              new Kernel(
                  txManager,
                  propertyKeyTokens,
                  labelTokens,
                  relationshipTypeTokens,
                  persistenceManager,
                  lockManager,
                  updateableSchemaState,
                  schemaWriteGuard,
                  indexingService,
                  nodeManager,
                  neoStore,
                  persistenceCache,
                  schemaCache,
                  providerMap,
                  labelScanStore,
                  readOnly));

      life.init();

      // TODO: Why isn't this done in the init() method of the indexing service?
      if (!readOnly) {
        neoStore.setRecoveredStatus(true);
        try {
          indexingService.initIndexes(loadIndexRules());
          xaContainer.openLogicalLog();
        } finally {
          neoStore.setRecoveredStatus(false);
        }
      }
      if (!xaContainer.getResourceManager().hasRecoveredTransactions()) {
        neoStore.makeStoreOk();
      } else {
        msgLog.debug("Waiting for TM to take care of recovered " + "transactions.");
      }
      idGenerators = new ArrayMap<>((byte) 5, false, false);
      this.idGenerators.put(Node.class, neoStore.getNodeStore());
      this.idGenerators.put(Relationship.class, neoStore.getRelationshipStore());
      this.idGenerators.put(RelationshipType.class, neoStore.getRelationshipTypeStore());
      this.idGenerators.put(Label.class, neoStore.getLabelTokenStore());
      this.idGenerators.put(PropertyStore.class, neoStore.getPropertyStore());
      this.idGenerators.put(
          PropertyKeyTokenRecord.class, neoStore.getPropertyStore().getPropertyKeyTokenStore());
      setLogicalLogAtCreationTime(xaContainer.getLogicalLog());

      life.start();
    } catch (Throwable e) { // Something unexpected happened during startup
      try { // Close the neostore, so that locks are released properly
        neoStore.close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }
  }
コード例 #19
0
  @Override
  public void start() throws IOException {
    life = new LifeSupport();
    readOnly = config.get(Configuration.read_only);
    storeDir = config.get(Configuration.store_dir);
    File store = config.get(Configuration.neo_store);
    if (!storeFactory.storeExists()) {
      storeFactory.createNeoStore().close();
    }
    indexProvider =
        dependencyResolver.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    storeMigrationProcess.addParticipant(indexProvider.storeMigrationParticipant());
    // TODO: Build a real provider map
    final DefaultSchemaIndexProviderMap providerMap =
        new DefaultSchemaIndexProviderMap(indexProvider);
    storeMigrationProcess.migrateIfNeeded(store.getParentFile());
    neoStore = dependencies.satisfyDependency(storeFactory.newNeoStore(false));
    dependencies.satisfyDependency(TransactionIdStore.class, neoStore);

    schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList());

    nodeCache = new AutoLoadingCache<>(cacheProvider.node(), nodeLoader(neoStore.getNodeStore()));
    relationshipCache =
        new AutoLoadingCache<>(
            cacheProvider.relationship(), relationshipLoader(neoStore.getRelationshipStore()));
    RelationshipLoader relationshipLoader =
        new RelationshipLoader(relationshipCache, new RelationshipChainLoader(neoStore));
    PersistenceCache persistenceCache =
        new PersistenceCache(
            nodeCache,
            relationshipCache,
            nodeManager,
            relationshipLoader,
            propertyKeyTokenHolder,
            relationshipTypeTokens,
            labelTokens);
    CacheAccessBackDoor cacheAccess =
        new BridgingCacheAccess(schemaCache, updateableSchemaState, persistenceCache);
    try {
      indexingService =
          new IndexingService(
              scheduler,
              providerMap,
              new NeoStoreIndexStoreView(lockService, neoStore),
              tokenNameLookup,
              updateableSchemaState,
              indexRuleLoader(),
              logging,
              indexingServiceMonitor); // TODO 2.2-future What index rules should be
      final IntegrityValidator integrityValidator =
          new IntegrityValidator(neoStore, indexingService);
      labelScanStore =
          dependencyResolver
              .resolveDependency(
                  LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
              .getLabelScanStore();
      fileListing =
          new NeoStoreFileListing(
              storeDir, labelScanStore, indexingService, legacyIndexProviderLookup);
      Provider<NeoStore> neoStoreProvider =
          new Provider<NeoStore>() {
            @Override
            public NeoStore instance() {
              return getNeoStore();
            }
          };

      if (config.get(GraphDatabaseSettings.cache_type).equals(CacheLayer.EXPERIMENTAL_OFF)) {
        storeLayer =
            new DiskLayer(
                propertyKeyTokenHolder,
                labelTokens,
                relationshipTypeTokens,
                new SchemaStorage(neoStore.getSchemaStore()),
                neoStoreProvider,
                indexingService);
      } else {
        storeLayer =
            new CacheLayer(
                new DiskLayer(
                    propertyKeyTokenHolder,
                    labelTokens,
                    relationshipTypeTokens,
                    new SchemaStorage(neoStore.getSchemaStore()),
                    neoStoreProvider,
                    indexingService),
                persistenceCache,
                indexingService,
                schemaCache);
      }

      LegacyPropertyTrackers legacyPropertyTrackers =
          new LegacyPropertyTrackers(
              propertyKeyTokenHolder,
              nodeManager.getNodePropertyTrackers(),
              nodeManager.getRelationshipPropertyTrackers(),
              nodeManager);
      StatisticsService statisticsService =
          new StatisticsServiceRepository(fs, config, storeLayer, scheduler).loadStatistics();
      final NeoStoreTransactionContextSupplier neoStoreTransactionContextSupplier =
          new NeoStoreTransactionContextSupplier(neoStore);

      final TransactionHooks hooks = new TransactionHooks();
      File directory = config.get(GraphDatabaseSettings.store_dir);
      TransactionMetadataCache transactionMetadataCache =
          new TransactionMetadataCache(1000, 100_000);
      PhysicalLogFiles logFiles = new PhysicalLogFiles(directory, PhysicalLogFile.DEFAULT_NAME, fs);

      LogFileInformation logFileInformation =
          dependencies.satisfyDependency(
              LogFileInformation.class,
              new PhysicalLogFileInformation(
                  logFiles,
                  transactionMetadataCache,
                  neoStore,
                  new PhysicalLogFileInformation.SPI() {
                    @Override
                    public long getTimestampForVersion(long version) throws IOException {
                      LogPosition position = new LogPosition(version, LOG_HEADER_SIZE);
                      try (ReadableLogChannel channel = logFile.getReader(position)) {
                        LogEntryReader<ReadableLogChannel> reader =
                            new VersionAwareLogEntryReader();
                        LogEntry entry;
                        while ((entry = reader.readLogEntry(channel)) != null) {
                          if (entry instanceof LogEntryStart) {
                            return ((LogEntryStart) entry).getTimeWritten();
                          }
                        }
                      }
                      return -1;
                    }
                  }));

      LogPruneStrategy logPruneStrategy =
          LogPruneStrategyFactory.fromConfigValue(
              fs,
              logFileInformation,
              logFiles,
              neoStore,
              config.get(GraphDatabaseSettings.keep_logical_logs));

      final TransactionRepresentationStoreApplier storeApplier =
          dependencies.satisfyDependency(
              new TransactionRepresentationStoreApplier(
                  indexingService,
                  labelScanStore,
                  neoStore,
                  cacheAccess,
                  lockService,
                  legacyIndexProviderLookup,
                  indexConfigStore,
                  DEFAULT_HIGH_ID_TRACKING));

      LoggingLogFileMonitor logMonitor =
          new LoggingLogFileMonitor(logging.getMessagesLog(getClass()));
      RecoveryVisitor recoveryVisitor =
          new RecoveryVisitor(neoStore, storeApplier, recoveredCount, logMonitor);
      Visitor<ReadableLogChannel, IOException> logFileRecoverer =
          new LogFileRecoverer(new VersionAwareLogEntryReader(), recoveryVisitor);
      logFile =
          dependencies.satisfyDependency(
              new PhysicalLogFile(
                  fs,
                  logFiles,
                  config.get(GraphDatabaseSettings.logical_log_rotation_threshold),
                  logPruneStrategy,
                  neoStore,
                  neoStore,
                  logMonitor,
                  this,
                  transactionMetadataCache,
                  logFileRecoverer));

      final LogicalTransactionStore logicalTransactionStore =
          dependencies.satisfyDependency(
              LogicalTransactionStore.class,
              new PhysicalLogicalTransactionStore(
                  logFile,
                  txIdGenerator,
                  transactionMetadataCache,
                  neoStore,
                  config.get(GraphDatabaseSettings.batched_writes)));

      TransactionCommitProcess transactionCommitProcess =
          dependencies.satisfyDependency(
              TransactionCommitProcess.class,
              commitProcessFactory.create(
                  logicalTransactionStore,
                  kernelHealth,
                  neoStore,
                  storeApplier,
                  new NeoStoreInjectedTransactionValidator(integrityValidator),
                  false));

      /*
       * This is used by legacy indexes and constraint indexes whenever a transaction is to be spawned
       * from within an existing transaction. It smells, and we should look over alternatives when time permits.
       */
      Provider<KernelAPI> kernelProvider =
          new Provider<KernelAPI>() {
            @Override
            public KernelAPI instance() {
              return kernel;
            }
          };

      ConstraintIndexCreator constraintIndexCreator =
          new ConstraintIndexCreator(kernelProvider, indexingService);

      LegacyIndexStore legacyIndexStore =
          new LegacyIndexStore(config, indexConfigStore, kernelProvider, legacyIndexProviderLookup);

      StatementOperationParts statementOperations =
          buildStatementOperations(
              storeLayer,
              legacyPropertyTrackers,
              constraintIndexCreator,
              updateableSchemaState,
              guard,
              legacyIndexStore);

      kernelTransactions =
          life.add(
              new KernelTransactions(
                  neoStoreTransactionContextSupplier,
                  neoStore,
                  locks,
                  integrityValidator,
                  constraintIndexCreator,
                  indexingService,
                  labelScanStore,
                  statementOperations,
                  updateableSchemaState,
                  schemaWriteGuard,
                  providerMap,
                  transactionHeaderInformationFactory,
                  persistenceCache,
                  storeLayer,
                  transactionCommitProcess,
                  indexConfigStore,
                  legacyIndexProviderLookup,
                  hooks,
                  transactionMonitor,
                  life,
                  readOnly));

      kernel =
          new Kernel(
              statisticsService, kernelTransactions, hooks, kernelHealth, transactionMonitor);

      life.add(logFile);
      life.add(logicalTransactionStore);
      life.add(
          new LifecycleAdapter() {
            @Override
            public void start() throws Throwable {
              startupStatistics.setNumberOfRecoveredTransactions(recoveredCount.get());
              recoveredCount.set(0);
              loadSchemaCache();
            }
          });
      life.add(statisticsService);
      life.add(
          new LifecycleAdapter() {
            @Override
            public void start() {
              neoStore.makeStoreOk();
            }
          });
      life.add(indexingService);
      life.add(labelScanStore);

      kernel.registerTransactionHook(transactionEventHandlers);
      neoStore.setRecoveredStatus(true);
      try {
        life.start();
      } finally {
        neoStore.setRecoveredStatus(false);
      }

      propertyKeyTokenHolder.addTokens(
          neoStore.getPropertyKeyTokenStore().getTokens(Integer.MAX_VALUE));
      relationshipTypeTokens.addTokens(
          neoStore.getRelationshipTypeTokenStore().getTokens(Integer.MAX_VALUE));
      labelTokens.addTokens(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    } catch (Throwable e) { // Something unexpected happened during startup
      try { // Close the neostore, so that locks are released properly
        neoStore.close();
      } catch (Exception closeException) {
        msgLog.logMessage("Couldn't close neostore after startup failure");
      }
      throw Exceptions.launderedException(e);
    }
  }
コード例 #20
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private CacheModule buildCaches(
      final NeoStore neoStore,
      Caches cacheProvider,
      NodeManager nodeManager,
      LabelTokenHolder labelTokens,
      RelationshipTypeTokenHolder relationshipTypeTokens,
      PropertyKeyTokenHolder propertyKeyTokenHolder) {
    final UpdateableSchemaState updateableSchemaState =
        new KernelSchemaStateStore(logging.getMessagesLog(KernelSchemaStateStore.class));

    final SchemaCache schemaCache = new SchemaCache(Collections.<SchemaRule>emptyList());

    final AutoLoadingCache<NodeImpl> nodeCache =
        new AutoLoadingCache<>(cacheProvider.node(), nodeLoader(neoStore.getNodeStore()));
    final AutoLoadingCache<RelationshipImpl> relationshipCache =
        new AutoLoadingCache<>(
            cacheProvider.relationship(), relationshipLoader(neoStore.getRelationshipStore()));
    RelationshipLoader relationshipLoader =
        new RelationshipLoader(
            lockService, relationshipCache, new RelationshipChainLoader(neoStore));
    final PersistenceCache persistenceCache =
        new PersistenceCache(
            nodeCache,
            relationshipCache,
            nodeManager,
            relationshipLoader,
            propertyKeyTokenHolder,
            relationshipTypeTokens,
            labelTokens);
    final CacheAccessBackDoor cacheAccess =
        new BridgingCacheAccess(schemaCache, updateableSchemaState, persistenceCache);

    life.add(
        new LifecycleAdapter() {
          @Override
          public void start() throws Throwable {
            loadSchemaCache();
          }

          @Override
          public void stop() throws Throwable {}
        });

    return new CacheModule() {
      @Override
      public SchemaCache schemaCache() {
        return schemaCache;
      }

      @Override
      public UpdateableSchemaState updateableSchemaState() {
        return updateableSchemaState;
      }

      @Override
      public PersistenceCache persistenceCache() {
        return persistenceCache;
      }

      @Override
      public CacheAccessBackDoor cacheAccess() {
        return cacheAccess;
      }

      @Override
      public Cache<NodeImpl> nodeCache() {
        return nodeCache;
      }

      @Override
      public Cache<RelationshipImpl> relationshipCache() {
        return relationshipCache;
      }
    };
  }
コード例 #21
0
 private StandaloneClusterClient(Logging logging, ClusterClient clusterClient) {
   life.add(logging);
   life.add(clusterClient);
   addShutdownHook();
   life.start();
 }
コード例 #22
0
  @Override
  protected RemoteTxHook createTxHook() {
    clusterEventsDelegateInvocationHandler = new DelegateInvocationHandler();
    memberContextDelegateInvocationHandler = new DelegateInvocationHandler();
    clusterMemberAvailabilityDelegateInvocationHandler = new DelegateInvocationHandler();

    clusterEvents =
        (ClusterMemberEvents)
            Proxy.newProxyInstance(
                ClusterMemberEvents.class.getClassLoader(),
                new Class[] {ClusterMemberEvents.class, Lifecycle.class},
                clusterEventsDelegateInvocationHandler);
    memberContext =
        (HighAvailabilityMemberContext)
            Proxy.newProxyInstance(
                HighAvailabilityMemberContext.class.getClassLoader(),
                new Class[] {HighAvailabilityMemberContext.class},
                memberContextDelegateInvocationHandler);
    clusterMemberAvailability =
        (ClusterMemberAvailability)
            Proxy.newProxyInstance(
                ClusterMemberAvailability.class.getClassLoader(),
                new Class[] {ClusterMemberAvailability.class},
                clusterMemberAvailabilityDelegateInvocationHandler);

    ElectionCredentialsProvider electionCredentialsProvider =
        config.get(HaSettings.slave_only)
            ? new NotElectableElectionCredentialsProvider()
            : new DefaultElectionCredentialsProvider(
                config.get(ClusterSettings.server_id),
                new OnDiskLastTxIdGetter(new File(getStoreDir())),
                new HighAvailabilityMemberInfoProvider() {
                  @Override
                  public HighAvailabilityMemberState getHighAvailabilityMemberState() {
                    return memberStateMachine.getCurrentState();
                  }
                });

    ObjectStreamFactory objectStreamFactory = new ObjectStreamFactory();

    clusterClient =
        new ClusterClient(
            ClusterClient.adapt(config),
            logging,
            electionCredentialsProvider,
            objectStreamFactory,
            objectStreamFactory);
    PaxosClusterMemberEvents localClusterEvents =
        new PaxosClusterMemberEvents(
            clusterClient,
            clusterClient,
            clusterClient,
            clusterClient,
            logging,
            new Predicate<PaxosClusterMemberEvents.ClusterMembersSnapshot>() {
              @Override
              public boolean accept(PaxosClusterMemberEvents.ClusterMembersSnapshot item) {
                for (MemberIsAvailable member : item.getCurrentAvailableMembers()) {
                  if (member.getRoleUri().getScheme().equals("ha")) {
                    if (HighAvailabilityModeSwitcher.getServerId(member.getRoleUri())
                        == config.get(ClusterSettings.server_id)) {
                      msgLog.error(
                          String.format(
                              "Instance %s has the same serverId as ours (%d) - will not "
                                  + "join this cluster",
                              member.getRoleUri(), config.get(ClusterSettings.server_id)));
                      return true;
                    }
                  }
                }
                return true;
              }
            },
            new HANewSnapshotFunction(),
            objectStreamFactory,
            objectStreamFactory);

    // Force a reelection after we enter the cluster
    // and when that election is finished refresh the snapshot
    clusterClient.addClusterListener(
        new ClusterListener.Adapter() {
          boolean hasRequestedElection =
              true; // This ensures that the election result is (at least) from our
          // request or thereafter

          @Override
          public void enteredCluster(ClusterConfiguration clusterConfiguration) {
            clusterClient.performRoleElections();
          }

          @Override
          public void elected(String role, InstanceId instanceId, URI electedMember) {
            if (hasRequestedElection && role.equals(ClusterConfiguration.COORDINATOR)) {
              clusterClient.removeClusterListener(this);
            }
          }
        });

    HighAvailabilityMemberContext localMemberContext =
        new SimpleHighAvailabilityMemberContext(clusterClient.getServerId());
    PaxosClusterMemberAvailability localClusterMemberAvailability =
        new PaxosClusterMemberAvailability(
            clusterClient.getServerId(),
            clusterClient,
            clusterClient,
            logging,
            objectStreamFactory,
            objectStreamFactory);

    memberContextDelegateInvocationHandler.setDelegate(localMemberContext);
    clusterEventsDelegateInvocationHandler.setDelegate(localClusterEvents);
    clusterMemberAvailabilityDelegateInvocationHandler.setDelegate(localClusterMemberAvailability);

    members =
        new ClusterMembers(
            clusterClient,
            clusterClient,
            clusterEvents,
            new InstanceId(config.get(ClusterSettings.server_id)));
    memberStateMachine =
        new HighAvailabilityMemberStateMachine(
            memberContext,
            availabilityGuard,
            members,
            clusterEvents,
            clusterClient,
            logging.getMessagesLog(HighAvailabilityMemberStateMachine.class));

    HighAvailabilityConsoleLogger highAvailabilityConsoleLogger =
        new HighAvailabilityConsoleLogger(
            logging.getConsoleLog(HighAvailabilityConsoleLogger.class),
            new InstanceId(config.get(ClusterSettings.server_id)));
    availabilityGuard.addListener(highAvailabilityConsoleLogger);
    clusterEvents.addClusterMemberListener(highAvailabilityConsoleLogger);
    clusterClient.addClusterListener(highAvailabilityConsoleLogger);

    paxosLife.add(clusterClient);
    paxosLife.add(memberStateMachine);
    paxosLife.add(clusterEvents);
    paxosLife.add(localClusterMemberAvailability);

    DelegateInvocationHandler<RemoteTxHook> txHookDelegate = new DelegateInvocationHandler<>();
    RemoteTxHook txHook =
        (RemoteTxHook)
            Proxy.newProxyInstance(
                RemoteTxHook.class.getClassLoader(),
                new Class[] {RemoteTxHook.class},
                txHookDelegate);
    new TxHookModeSwitcher(
        memberStateMachine,
        txHookDelegate,
        master,
        new TxHookModeSwitcher.RequestContextFactoryResolver() {
          @Override
          public RequestContextFactory get() {
            return requestContextFactory;
          }
        },
        logging.getMessagesLog(TxHookModeSwitcher.class),
        dependencyResolver);
    return txHook;
  }
コード例 #23
0
ファイル: BatchInserterImpl.java プロジェクト: MPiunti/WhlOLD
  BatchInserterImpl(
      String storeDir,
      FileSystemAbstraction fileSystem,
      Map<String, String> stringParams,
      Iterable<KernelExtensionFactory<?>> kernelExtensions) {
    life = new LifeSupport();
    this.fileSystem = fileSystem;
    this.storeDir = new File(FileUtils.fixSeparatorsInPath(storeDir));

    rejectAutoUpgrade(stringParams);
    msgLog = StringLogger.loggerDirectory(fileSystem, this.storeDir);
    logging = new SingleLoggingService(msgLog);
    Map<String, String> params = getDefaultParams();
    params.put(GraphDatabaseSettings.use_memory_mapped_buffers.name(), Settings.FALSE);
    params.put(InternalAbstractGraphDatabase.Configuration.store_dir.name(), storeDir);
    params.putAll(stringParams);

    storeLocker = new StoreLocker(fileSystem);
    storeLocker.checkLock(this.storeDir);

    config = new Config(params, GraphDatabaseSettings.class);
    boolean dump = config.get(GraphDatabaseSettings.dump_configuration);
    this.idGeneratorFactory = new DefaultIdGeneratorFactory();

    StoreFactory sf =
        new StoreFactory(
            config, idGeneratorFactory, new DefaultWindowPoolFactory(), fileSystem, msgLog, null);

    File store = fixPath(this.storeDir, sf);

    if (dump) {
      dumpConfiguration(params);
    }
    msgLog.logMessage(Thread.currentThread() + " Starting BatchInserter(" + this + ")");
    neoStore = sf.newNeoStore(store);
    if (!neoStore.isStoreOk()) {
      throw new IllegalStateException(storeDir + " store is not cleanly shutdown.");
    }
    neoStore.makeStoreOk();
    Token[] indexes = getPropertyKeyTokenStore().getTokens(10000);
    propertyKeyTokens = new BatchTokenHolder(indexes);
    labelTokens = new BatchTokenHolder(neoStore.getLabelTokenStore().getTokens(Integer.MAX_VALUE));
    Token[] types = getRelationshipTypeStore().getTokens(Integer.MAX_VALUE);
    relationshipTypeTokens = new BatchTokenHolder(types);
    indexStore = life.add(new IndexStore(this.storeDir, fileSystem));
    schemaCache = new SchemaCache(neoStore.getSchemaStore());

    KernelExtensions extensions =
        life.add(
            new KernelExtensions(
                kernelExtensions,
                config,
                new DependencyResolverImpl(),
                UnsatisfiedDependencyStrategies.ignore()));

    life.start();

    SchemaIndexProvider provider =
        extensions.resolveDependency(
            SchemaIndexProvider.class, SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE);
    schemaIndexProviders = new DefaultSchemaIndexProviderMap(provider);
    labelScanStore =
        life.add(
            extensions
                .resolveDependency(
                    LabelScanStoreProvider.class, LabelScanStoreProvider.HIGHEST_PRIORITIZED)
                .getLabelScanStore());
    actions = new BatchSchemaActions();
  }
コード例 #24
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private TransactionLogModule buildTransactionLogs(
      Config config,
      Logging logging,
      LabelScanStore labelScanStore,
      FileSystemAbstraction fileSystemAbstraction,
      NeoStore neoStore,
      CacheAccessBackDoor cacheAccess,
      IndexingService indexingService,
      Iterable<IndexImplementation> indexProviders) {
    File directory = config.get(GraphDatabaseSettings.store_dir);
    TransactionMetadataCache transactionMetadataCache = new TransactionMetadataCache(1000, 100_000);
    final PhysicalLogFiles logFiles =
        new PhysicalLogFiles(directory, PhysicalLogFile.DEFAULT_NAME, fileSystemAbstraction);

    IdOrderingQueue legacyIndexTransactionOrdering = new SynchronizedArrayIdOrderingQueue(20);
    final TransactionRepresentationStoreApplier storeApplier =
        dependencies.satisfyDependency(
            new TransactionRepresentationStoreApplier(
                indexingService,
                labelScanStore,
                neoStore,
                cacheAccess,
                lockService,
                legacyIndexProviderLookup,
                indexConfigStore,
                legacyIndexTransactionOrdering));

    final PhysicalLogFile logFile =
        new PhysicalLogFile(
            fileSystemAbstraction,
            logFiles,
            config.get(GraphDatabaseSettings.logical_log_rotation_threshold),
            neoStore,
            neoStore,
            physicalLogMonitor,
            transactionMetadataCache);

    final PhysicalLogFileInformation.SPI logInformation =
        new PhysicalLogFileInformation.SPI() {
          @Override
          public long getTimestampForVersion(long version) throws IOException {
            LogPosition position = new LogPosition(version, LOG_HEADER_SIZE);
            try (ReadableVersionableLogChannel channel = logFile.getReader(position)) {
              final LogEntryReader<ReadableVersionableLogChannel> reader =
                  new LogEntryReaderFactory().versionable();
              LogEntry entry;
              while ((entry = reader.readLogEntry(channel)) != null) {
                if (entry instanceof LogEntryStart) {
                  return entry.<LogEntryStart>as().getTimeWritten();
                }
              }
            }
            return -1;
          }
        };
    final LogFileInformation logFileInformation =
        new PhysicalLogFileInformation(
            logFiles, transactionMetadataCache, neoStore, logInformation);

    LogPruneStrategy logPruneStrategy =
        LogPruneStrategyFactory.fromConfigValue(
            fs,
            logFileInformation,
            logFiles,
            neoStore,
            config.get(GraphDatabaseSettings.keep_logical_logs));

    monitors.addMonitorListener(new LogPruning(logPruneStrategy));

    final LogRotationControl logRotationControl =
        new LogRotationControl(neoStore, indexingService, labelScanStore, indexProviders);

    final LogRotation logRotation =
        new LogRotationImpl(
            monitors.newMonitor(LogRotation.Monitor.class), logFile, logRotationControl);

    final LogicalTransactionStore logicalTransactionStore =
        new PhysicalLogicalTransactionStore(
            logFile,
            logRotation,
            transactionMetadataCache,
            neoStore,
            legacyIndexTransactionOrdering,
            config.get(GraphDatabaseSettings.batched_writes));

    life.add(logFile);
    life.add(logicalTransactionStore);

    return new TransactionLogModule() {
      @Override
      public TransactionRepresentationStoreApplier storeApplier() {
        return storeApplier;
      }

      @Override
      public LogicalTransactionStore logicalTransactionStore() {
        return logicalTransactionStore;
      }

      @Override
      public LogFileInformation logFileInformation() {
        return logFileInformation;
      }

      @Override
      public PhysicalLogFiles logFiles() {
        return logFiles;
      }

      @Override
      public LogFile logFile() {
        return logFile;
      }

      @Override
      public LogRotationControl logRotationControl() {
        return logRotationControl;
      }

      @Override
      public LogRotation logRotation() {
        return logRotation;
      }
    };
  }
コード例 #25
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private KernelModule buildKernel(
      IntegrityValidator integrityValidator,
      LogicalTransactionStore logicalTransactionStore,
      NeoStore neoStore,
      TransactionRepresentationStoreApplier storeApplier,
      IndexingService indexingService,
      StoreReadLayer storeLayer,
      UpdateableSchemaState updateableSchemaState,
      LabelScanStore labelScanStore,
      PersistenceCache persistenceCache,
      SchemaIndexProviderMap schemaIndexProviderMap) {
    final TransactionCommitProcess transactionCommitProcess =
        commitProcessFactory.create(
            logicalTransactionStore,
            kernelHealth,
            neoStore,
            storeApplier,
            new NeoStoreInjectedTransactionValidator(integrityValidator),
            TransactionApplicationMode.INTERNAL,
            config);

    /*
     * This is used by legacy indexes and constraint indexes whenever a transaction is to be spawned
     * from within an existing transaction. It smells, and we should look over alternatives when time permits.
     */
    Provider<KernelAPI> kernelProvider =
        new Provider<KernelAPI>() {
          @Override
          public KernelAPI instance() {
            return kernelModule.kernelAPI();
          }
        };

    ConstraintIndexCreator constraintIndexCreator =
        new ConstraintIndexCreator(kernelProvider, indexingService);

    LegacyIndexStore legacyIndexStore =
        new LegacyIndexStore(config, indexConfigStore, kernelProvider, legacyIndexProviderLookup);

    LegacyPropertyTrackers legacyPropertyTrackers =
        new LegacyPropertyTrackers(
            propertyKeyTokenHolder,
            nodeManager.getNodePropertyTrackers(),
            nodeManager.getRelationshipPropertyTrackers(),
            nodeManager);
    final NeoStoreTransactionContextSupplier neoStoreTransactionContextSupplier =
        new NeoStoreTransactionContextSupplier(neoStore);

    StatementOperationParts statementOperations =
        buildStatementOperations(
            storeLayer,
            legacyPropertyTrackers,
            constraintIndexCreator,
            updateableSchemaState,
            guard,
            legacyIndexStore);

    final TransactionHooks hooks = new TransactionHooks();
    final KernelTransactions kernelTransactions =
        life.add(
            new KernelTransactions(
                neoStoreTransactionContextSupplier,
                neoStore,
                locks,
                integrityValidator,
                constraintIndexCreator,
                indexingService,
                labelScanStore,
                statementOperations,
                updateableSchemaState,
                schemaWriteGuard,
                schemaIndexProviderMap,
                transactionHeaderInformationFactory,
                persistenceCache,
                storeLayer,
                transactionCommitProcess,
                indexConfigStore,
                legacyIndexProviderLookup,
                hooks,
                transactionMonitor,
                life));

    final Kernel kernel = new Kernel(kernelTransactions, hooks, kernelHealth, transactionMonitor);

    kernel.registerTransactionHook(transactionEventHandlers);

    final NeoStoreFileListing fileListing =
        new NeoStoreFileListing(
            storeDir, labelScanStore, indexingService, legacyIndexProviderLookup);

    return new KernelModule() {
      @Override
      public TransactionCommitProcess transactionCommitProcess() {
        return transactionCommitProcess;
      }

      @Override
      public KernelAPI kernelAPI() {
        return kernel;
      }

      @Override
      public KernelTransactions kernelTransactions() {
        return kernelTransactions;
      }

      @Override
      public NeoStoreFileListing fileListing() {
        return fileListing;
      }
    };
  }
コード例 #26
0
ファイル: NeoStoreDataSource.java プロジェクト: 0xNacho/neo4j
  private void buildRecovery(
      final FileSystemAbstraction fileSystemAbstraction,
      CacheAccessBackDoor cacheAccess,
      IndexingService indexingService,
      LabelScanStore labelScanStore,
      final NeoStore neoStore,
      RecoveryVisitor.Monitor recoveryVisitorMonitor,
      Recovery.Monitor recoveryMonitor,
      final PhysicalLogFiles logFiles,
      final LogRotationControl logRotationControl,
      final StartupStatisticsProvider startupStatistics) {
    final TransactionRepresentationStoreApplier storeRecoverer =
        new TransactionRepresentationStoreApplier(
            indexingService,
            labelScanStore,
            neoStore,
            cacheAccess,
            lockService,
            legacyIndexProviderLookup,
            indexConfigStore,
            IdOrderingQueue.BYPASS);

    RecoveryVisitor recoveryVisitor =
        new RecoveryVisitor(neoStore, storeRecoverer, recoveryVisitorMonitor);

    LogEntryReader<ReadableVersionableLogChannel> logEntryReader =
        new LogEntryReaderFactory().versionable();
    final Visitor<LogVersionedStoreChannel, IOException> logFileRecoverer =
        new LogFileRecoverer(logEntryReader, recoveryVisitor);

    Recovery recovery =
        new Recovery(
            new Recovery.SPI() {
              @Override
              public void forceEverything() {
                logRotationControl.forceEverything();
              }

              @Override
              public long getCurrentLogVersion() {
                return neoStore.getCurrentLogVersion();
              }

              @Override
              public Visitor<LogVersionedStoreChannel, IOException> getRecoverer() {
                return logFileRecoverer;
              }

              @Override
              public PhysicalLogVersionedStoreChannel getLogFile(long recoveryVersion)
                  throws IOException {
                return PhysicalLogFile.openForVersion(
                    logFiles, fileSystemAbstraction, recoveryVersion);
              }
            },
            recoveryMonitor);

    life.add(recovery);

    life.add(
        new LifecycleAdapter() {
          @Override
          public void start() throws Throwable {
            startupStatistics.setNumberOfRecoveredTransactions(recoveredCount.get());
            recoveredCount.set(0);
          }
        });
  }