protected void createLiveConfig(NodeManager nodeManager, int liveNode, int... otherLiveNodes) throws Exception { TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(liveNode, isNetty())); Configuration config0 = super.createDefaultConfig(); config0.getAcceptorConfigurations().clear(); config0 .getAcceptorConfigurations() .add(createTransportConfiguration(isNetty(), true, generateParams(liveNode, isNetty()))); config0.setSecurityEnabled(false); config0.setSharedStore(true); List<String> pairs = new ArrayList<String>(); for (int node : otherLiveNodes) { TransportConfiguration otherLiveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty())); config0.getConnectorConfigurations().put(otherLiveConnector.getName(), otherLiveConnector); pairs.add(otherLiveConnector.getName()); } basicClusterConnectionConfig(config0, liveConnector.getName(), pairs); config0.getConnectorConfigurations().put(liveConnector.getName(), liveConnector); config0.setBindingsDirectory(config0.getBindingsDirectory() + "_" + liveNode); config0.setJournalDirectory(config0.getJournalDirectory() + "_" + liveNode); config0.setPagingDirectory(config0.getPagingDirectory() + "_" + liveNode); config0.setLargeMessagesDirectory(config0.getLargeMessagesDirectory() + "_" + liveNode); servers.put( liveNode, new SameProcessHornetQServer( createInVMFailoverServer(true, config0, nodeManager, liveNode))); }
protected static Configuration createDefaultConfig( final Map<String, Object> params, final String... acceptors) { Configuration configuration = new ConfigurationImpl(); configuration.setSecurityEnabled(false); configuration.setJMXManagementEnabled(false); configuration.setBindingsDirectory(getBindingsDir()); configuration.setJournalMinFiles(2); configuration.setJournalDirectory(getJournalDir()); configuration.setJournalFileSize(100 * 1024); configuration.setPagingDirectory(getPageDir()); configuration.setLargeMessagesDirectory(getLargeMessagesDir()); configuration.setJournalCompactMinFiles(0); configuration.setJournalCompactPercentage(0); configuration.setFileDeploymentEnabled(false); configuration.setJournalType(getDefaultJournalType()); configuration.getAcceptorConfigurations().clear(); for (String acceptor : acceptors) { TransportConfiguration transportConfig = new TransportConfiguration(acceptor, params); configuration.getAcceptorConfigurations().add(transportConfig); } return configuration; }
protected void createReplicatedConfigs() throws Exception { nodeManager = new InVMNodeManager(); final TransportConfiguration liveConnector = getConnectorTransportConfiguration(true); final TransportConfiguration backupConnector = getConnectorTransportConfiguration(false); final TransportConfiguration backupAcceptor = getAcceptorTransportConfiguration(false); nodeManager = new InVMNodeManager(); backupConfig = createDefaultConfig(); liveConfig = createDefaultConfig(); ReplicatedBackupUtils.configureReplicationPair( backupConfig, backupConnector, backupAcceptor, liveConfig, liveConnector); final String sufix = "_backup"; backupConfig.setBindingsDirectory(backupConfig.getBindingsDirectory() + sufix); backupConfig.setJournalDirectory(backupConfig.getJournalDirectory() + sufix); backupConfig.setPagingDirectory(backupConfig.getPagingDirectory() + sufix); backupConfig.setLargeMessagesDirectory(backupConfig.getLargeMessagesDirectory() + sufix); backupConfig.setSecurityEnabled(false); backupServer = createTestableServer(backupConfig); liveConfig.getAcceptorConfigurations().clear(); liveConfig.getAcceptorConfigurations().add(getAcceptorTransportConfiguration(true)); liveServer = createTestableServer(liveConfig); }
private void configureLocations(String dataDirectory) { if (dataDirectory == null || !new File(dataDirectory).exists()) { throw new HornetNestException("Must provide data directory that exists"); } config.setBindingsDirectory(dataDirectory + "/bindings"); config.setJournalDirectory(dataDirectory + "/journal"); config.setLargeMessagesDirectory(dataDirectory + "/largemessages"); config.setPagingDirectory(dataDirectory + "/paging"); }
protected void createBackupConfig( NodeManager nodeManager, int liveNode, int nodeid, boolean createClusterConnections, int[] otherBackupNodes, int... otherClusterNodes) throws Exception { Configuration config1 = super.createDefaultConfig(); config1.getAcceptorConfigurations().clear(); config1 .getAcceptorConfigurations() .add(createTransportConfiguration(isNetty(), true, generateParams(nodeid, isNetty()))); config1.setSecurityEnabled(false); config1.setSharedStore(true); config1.setBackup(true); List<String> staticConnectors = new ArrayList<String>(); for (int node : otherBackupNodes) { TransportConfiguration liveConnector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty())); config1.getConnectorConfigurations().put(liveConnector.getName(), liveConnector); staticConnectors.add(liveConnector.getName()); } TransportConfiguration backupConnector = createTransportConfiguration(isNetty(), false, generateParams(nodeid, isNetty())); config1.getConnectorConfigurations().put(backupConnector.getName(), backupConnector); List<String> clusterNodes = new ArrayList<String>(); for (int node : otherClusterNodes) { TransportConfiguration connector = createTransportConfiguration(isNetty(), false, generateParams(node, isNetty())); config1.getConnectorConfigurations().put(connector.getName(), connector); clusterNodes.add(connector.getName()); } basicClusterConnectionConfig(config1, backupConnector.getName(), clusterNodes); config1.setBindingsDirectory(config1.getBindingsDirectory() + "_" + liveNode); config1.setJournalDirectory(config1.getJournalDirectory() + "_" + liveNode); config1.setPagingDirectory(config1.getPagingDirectory() + "_" + liveNode); config1.setLargeMessagesDirectory(config1.getLargeMessagesDirectory() + "_" + liveNode); servers.put( nodeid, new SameProcessHornetQServer( createInVMFailoverServer(true, config1, nodeManager, liveNode))); }
private void setupServer(boolean backup, String... interceptors) throws Exception { final TransportConfiguration liveConnector = TransportConfigurationUtils.getInVMConnector(true); final TransportConfiguration backupConnector = TransportConfigurationUtils.getInVMConnector(false); final TransportConfiguration backupAcceptor = TransportConfigurationUtils.getInVMAcceptor(false); Configuration backupConfig = createDefaultConfig(); Configuration liveConfig = createDefaultConfig(); backupConfig.setBackup(backup); final String suffix = "_backup"; backupConfig.setBindingsDirectory(backupConfig.getBindingsDirectory() + suffix); backupConfig.setJournalDirectory(backupConfig.getJournalDirectory() + suffix); backupConfig.setPagingDirectory(backupConfig.getPagingDirectory() + suffix); backupConfig.setLargeMessagesDirectory(backupConfig.getLargeMessagesDirectory() + suffix); if (interceptors.length > 0) { List<String> interceptorsList = Arrays.asList(interceptors); backupConfig.setIncomingInterceptorClassNames(interceptorsList); } ReplicatedBackupUtils.configureReplicationPair( backupConfig, backupConnector, backupAcceptor, liveConfig, liveConnector); if (backup) { liveServer = createServer(liveConfig); liveServer.start(); waitForComponent(liveServer); } backupServer = createServer(backupConfig); locator = createInVMNonHALocator(); backupServer.start(); if (backup) { ServiceTestBase.waitForRemoteBackup(null, 5, true, backupServer); } int count = 0; waitForReplication(count); }
private Configuration getConfiguration( String identity, BackupStrategy backupStrategy, TransportConfiguration liveConnector, TransportConfiguration liveAcceptor, TransportConfiguration... otherLiveNodes) throws Exception { Configuration configuration = createDefaultConfig(); configuration.getAcceptorConfigurations().clear(); configuration.getAcceptorConfigurations().add(liveAcceptor); configuration.getConnectorConfigurations().put(liveConnector.getName(), liveConnector); configuration.setJournalDirectory(configuration.getJournalDirectory() + identity); configuration.setBindingsDirectory(configuration.getBindingsDirectory() + identity); configuration.setLargeMessagesDirectory(configuration.getLargeMessagesDirectory() + identity); configuration.setPagingDirectory(configuration.getPagingDirectory() + identity); List<String> transportConfigurationList = new ArrayList<>(); final HAPolicy haPolicy = new HAPolicy(); for (TransportConfiguration otherLiveNode : otherLiveNodes) { configuration.getConnectorConfigurations().put(otherLiveNode.getName(), otherLiveNode); transportConfigurationList.add(otherLiveNode.getName()); haPolicy.getRemoteConnectors().add(otherLiveNode.getName()); } basicClusterConnectionConfig( configuration, liveConnector.getName(), transportConfigurationList); configuration .getQueueConfigurations() .add(new CoreQueueConfiguration("jms.queue.testQueue", "jms.queue.testQueue", null, true)); haPolicy.setPolicyType(policyType); haPolicy.setBackupStrategy(backupStrategy); haPolicy.setBackupPortOffset(100); haPolicy.setBackupRequestRetries(-1); haPolicy.setBackupRequestRetryInterval(500); haPolicy.setMaxBackups(1); haPolicy.setRequestBackup(true); configuration.setHAPolicy(haPolicy); return configuration; }
public void contextInitialized(Injector injector) { org.candlepin.common.config.Configuration candlepinConfig = injector.getInstance(org.candlepin.common.config.Configuration.class); if (hornetqServer == null) { Configuration config = new ConfigurationImpl(); HashSet<TransportConfiguration> transports = new HashSet<TransportConfiguration>(); transports.add(new TransportConfiguration(InVMAcceptorFactory.class.getName())); config.setAcceptorConfigurations(transports); // alter the default pass to silence log output config.setClusterUser(null); config.setClusterPassword(null); // in vm, who needs security? config.setSecurityEnabled(false); config.setJournalType(JournalType.NIO); config.setCreateBindingsDir(true); config.setCreateJournalDir(true); String baseDir = candlepinConfig.getString(ConfigProperties.HORNETQ_BASE_DIR); config.setBindingsDirectory(new File(baseDir, "bindings").toString()); config.setJournalDirectory(new File(baseDir, "journal").toString()); config.setLargeMessagesDirectory(new File(baseDir, "largemsgs").toString()); config.setPagingDirectory(new File(baseDir, "paging").toString()); Map<String, AddressSettings> settings = new HashMap<String, AddressSettings>(); AddressSettings pagingConfig = new AddressSettings(); String addressPolicyString = candlepinConfig.getString(ConfigProperties.HORNETQ_ADDRESS_FULL_POLICY); long maxQueueSizeInMb = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_QUEUE_SIZE); long maxPageSizeInMb = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_PAGE_SIZE); AddressFullMessagePolicy addressPolicy = null; if (addressPolicyString.equals("PAGE")) { addressPolicy = AddressFullMessagePolicy.PAGE; } else if (addressPolicyString.equals("BLOCK")) { addressPolicy = AddressFullMessagePolicy.BLOCK; } else { throw new IllegalArgumentException( "Unknown HORNETQ_ADDRESS_FULL_POLICY: " + addressPolicyString + " . Please use one of: PAGE, BLOCK"); } // Paging sizes need to be converted to bytes pagingConfig.setMaxSizeBytes(maxQueueSizeInMb * FileUtils.ONE_MB); if (addressPolicy == AddressFullMessagePolicy.PAGE) { pagingConfig.setPageSizeBytes(maxPageSizeInMb * FileUtils.ONE_MB); } pagingConfig.setAddressFullMessagePolicy(addressPolicy); // Enable for all the queues settings.put("#", pagingConfig); config.setAddressesSettings(settings); int maxScheduledThreads = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_SCHEDULED_THREADS); int maxThreads = candlepinConfig.getInt(ConfigProperties.HORNETQ_MAX_THREADS); if (maxThreads != -1) { config.setThreadPoolMaxSize(maxThreads); } if (maxScheduledThreads != -1) { config.setScheduledThreadPoolMaxSize(maxScheduledThreads); } /** * Anything up to size of LARGE_MSG_SIZE may be needed to be written to the Journal, so we * must set buffer size accordingly. * * <p>If buffer size would be < LARGE_MSG_SIZE we may get exceptions such as this: Can't write * records bigger than the bufferSize(XXXYYY) on the journal */ int largeMsgSize = candlepinConfig.getInt(ConfigProperties.HORNETQ_LARGE_MSG_SIZE); config.setJournalBufferSize_AIO(largeMsgSize); config.setJournalBufferSize_NIO(largeMsgSize); hornetqServer = new EmbeddedHornetQ(); hornetqServer.setConfiguration(config); } try { hornetqServer.start(); log.info("Hornetq server started"); } catch (Exception e) { log.error("Failed to start hornetq message server:", e); throw new RuntimeException(e); } setupAmqp(injector, candlepinConfig); cleanupOldQueues(); List<String> listeners = getHornetqListeners(candlepinConfig); eventSource = injector.getInstance(EventSource.class); for (int i = 0; i < listeners.size(); i++) { try { Class<?> clazz = this.getClass().getClassLoader().loadClass(listeners.get(i)); eventSource.registerListener((EventListener) injector.getInstance(clazz)); } catch (Exception e) { log.warn("Unable to register listener " + listeners.get(i), e); } } // Initialize the Event sink AFTER the internal server has been // created and started. EventSink sink = injector.getInstance(EventSink.class); try { sink.initialize(); } catch (Exception e) { log.error("Failed to initialize EventSink:", e); throw new RuntimeException(e); } }
/** @throws Exception */ protected void startServers() throws Exception { NodeManager nodeManager = new InVMNodeManager(false); backuptc = new TransportConfiguration(INVM_CONNECTOR_FACTORY, backupParams); livetc = new TransportConfiguration(INVM_CONNECTOR_FACTORY); liveAcceptortc = new TransportConfiguration(INVM_ACCEPTOR_FACTORY); backupAcceptortc = new TransportConfiguration(INVM_ACCEPTOR_FACTORY, backupParams); backupConf = createBasicConfig(0); backupConf.getAcceptorConfigurations().add(backupAcceptortc); backupConf.getConnectorConfigurations().put(livetc.getName(), livetc); backupConf.getConnectorConfigurations().put(backuptc.getName(), backuptc); basicClusterConnectionConfig(backupConf, backuptc.getName(), livetc.getName()); backupConf.setSecurityEnabled(false); backupConf.setJournalType(getDefaultJournalType()); backupParams.put(TransportConstants.SERVER_ID_PROP_NAME, 1); backupConf .getAcceptorConfigurations() .add(new TransportConfiguration(INVM_ACCEPTOR_FACTORY, backupParams)); backupConf.setBackup(true); backupConf.setSharedStore(true); backupConf.setBindingsDirectory(getBindingsDir()); backupConf.setJournalMinFiles(2); backupConf.setJournalDirectory(getJournalDir()); backupConf.setPagingDirectory(getPageDir()); backupConf.setLargeMessagesDirectory(getLargeMessagesDir()); backupConf.setPersistenceEnabled(true); backupService = new InVMNodeManagerServer(backupConf, nodeManager); backupJMSService = new JMSServerManagerImpl(backupService); backupJMSService.setContext(ctx2); backupJMSService.getHornetQServer().setIdentity("JMSBackup"); log.info("Starting backup"); backupJMSService.start(); liveConf = createBasicConfig(0); liveConf.setJournalDirectory(getJournalDir()); liveConf.setBindingsDirectory(getBindingsDir()); liveConf.setSecurityEnabled(false); liveConf.getAcceptorConfigurations().add(liveAcceptortc); basicClusterConnectionConfig(liveConf, livetc.getName()); liveConf.setSharedStore(true); liveConf.setJournalType(getDefaultJournalType()); liveConf.setBindingsDirectory(getBindingsDir()); liveConf.setJournalMinFiles(2); liveConf.setJournalDirectory(getJournalDir()); liveConf.setPagingDirectory(getPageDir()); liveConf.setLargeMessagesDirectory(getLargeMessagesDir()); liveConf.getConnectorConfigurations().put(livetc.getName(), livetc); liveConf.setPersistenceEnabled(true); liveService = new InVMNodeManagerServer(liveConf, nodeManager); liveJMSService = new JMSServerManagerImpl(liveService); liveJMSService.setContext(ctx1); liveJMSService.getHornetQServer().setIdentity("JMSLive"); log.info("Starting life"); liveJMSService.start(); JMSUtil.waitForServer(backupService); }