SocketParams() { TCProperties props = TCPropertiesImpl.getProperties().getPropertiesFor(PREFIX); this.recvBuffer = props.getInt(RECV_BUFFER, -1); this.sendBuffer = props.getInt(SEND_BUFFER, -1); this.keepAlive = props.getBoolean(KEEP_ALIVE); this.tcpNoDelay = props.getBoolean(TCP_NO_DELAY); }
@Override protected String getProperty(String key) { if (key.startsWith(TCPropertiesImpl.SYSTEM_PROP_PREFIX)) { key = key.substring(TCPropertiesImpl.SYSTEM_PROP_PREFIX.length()); return TCPropertiesImpl.getProperties().getProperty(key, true); } else { return System.getProperty(key); } }
protected void checkClientServerVersionCompatibility(String serverVersion) { final boolean check = TCPropertiesImpl.getProperties().getBoolean(TCPropertiesConsts.VERSION_COMPATIBILITY_CHECK); if (check && !new VersionCompatibility() .isCompatibleClientServer(new Version(clientVersion), new Version(serverVersion))) { final String msg = "Client/Server versions are not compatibile: Client Version: " + clientVersion + ", Server Version: " + serverVersion + ". Terminating client now."; CONSOLE_LOGGER.error(msg); throw new IllegalStateException(msg); } }
public class ServerLockFactoryImpl implements LockFactory { private static final boolean GREEDY_LOCKS_ENABLED = TCPropertiesImpl.getProperties() .getBoolean(TCPropertiesConsts.L2_LOCKMANAGER_GREEDY_LOCKS_ENABLED); private final LockFactory factory; public ServerLockFactoryImpl() { if (GREEDY_LOCKS_ENABLED) { factory = new GreedyPolicyFactory(); } else { factory = new NonGreedyLockPolicyFactory(); } } @Override public ServerLock createLock(LockID lid) { return factory.createLock(lid); } }
@Override public boolean isLegacyProductionModeEnabled() { return TCPropertiesImpl.getProperties() .getBoolean(TCPropertiesConsts.L2_ENABLE_LEGACY_PRODUCTION_MODE); }
@Override public String getTCProperties() { Properties props = TCPropertiesImpl.getProperties().addAllPropertiesTo(new Properties()); String keyPrefix = /* TCPropertiesImpl.SYSTEM_PROP_PREFIX */ null; return format(props, keyPrefix); }
public class LockResponseContextFactory { private static final int LOCK_LEASE_TIME = TCPropertiesImpl.getProperties() .getInt(TCPropertiesConsts.L2_LOCKMANAGER_GREEDY_LEASE_LEASETIME_INMILLS); private static final boolean LOCK_LEASE_ENABLE = TCPropertiesImpl.getProperties() .getBoolean(TCPropertiesConsts.L2_LOCKMANAGER_GREEDY_LEASE_ENABLED); public static LockResponseContext createLockRejectedResponseContext( final LockID lockID, final NodeID nodeID, final ThreadID threadID, final ServerLockLevel level) { return new LockResponseContext( lockID, nodeID, threadID, level, LockResponseContext.LOCK_NOT_AWARDED); } public static LockResponseContext createLockAwardResponseContext( final LockID lockID, final NodeID nodeID, final ThreadID threadID, final ServerLockLevel level) { LockResponseContext lrc = new LockResponseContext(lockID, nodeID, threadID, level, LockResponseContext.LOCK_AWARD); return lrc; } public static LockResponseContext createLockRecallResponseContext( final LockID lockID, final NodeID nodeID, final ThreadID threadID, final ServerLockLevel level) { if (LOCK_LEASE_ENABLE) { return new LockResponseContext( lockID, nodeID, threadID, level, LockResponseContext.LOCK_RECALL, LOCK_LEASE_TIME); } else { return new LockResponseContext( lockID, nodeID, threadID, level, LockResponseContext.LOCK_RECALL); } } public static LockResponseContext createLockWaitTimeoutResponseContext( final LockID lockID, final NodeID nodeID, final ThreadID threadID, final ServerLockLevel level) { return new LockResponseContext( lockID, nodeID, threadID, level, LockResponseContext.LOCK_WAIT_TIMEOUT); } public static LockResponseContext createLockQueriedResponseContext( final LockID lockID, final NodeID nodeID, final ThreadID threadID, final ServerLockLevel level, Collection<ClientServerExchangeLockContext> contexts, int numberOfPendingRequests) { return new LockResponseContext( lockID, nodeID, threadID, level, contexts, numberOfPendingRequests, LockResponseContext.LOCK_INFO); } }
void shutdownResources() { final TCLogger logger = DSO_LOGGER; if (this.counterManager != null) { try { this.counterManager.shutdown(); } catch (final Throwable t) { logger.error("error shutting down counter manager", t); } finally { this.counterManager = null; } } if (this.tcMemManager != null) { try { this.tcMemManager.shutdown(); } catch (final Throwable t) { logger.error("Error stopping memory manager", t); } finally { this.tcMemManager = null; } } if (this.lockManager != null) { try { this.lockManager.shutdown(false); } catch (final Throwable t) { logger.error("Error stopping lock manager", t); } finally { this.lockManager = null; } } try { this.communicationStageManager.stopAll(); } catch (final Throwable t) { logger.error("Error stopping stage manager", t); } if (this.channel != null) { try { this.channel.close(); } catch (final Throwable t) { logger.error("Error closing channel", t); } finally { this.channel = null; } } if (this.communicationsManager != null) { try { this.communicationsManager.shutdown(); } catch (final Throwable t) { logger.error("Error shutting down communications manager", t); } finally { this.communicationsManager = null; } } if (taskRunner != null) { logger.info("Shutting down TaskRunner"); taskRunner.shutdown(); } CommonShutDownHook.shutdown(); this.cluster.shutdown(); if (this.threadGroup != null) { boolean interrupted = false; try { final long end = System.currentTimeMillis() + TCPropertiesImpl.getProperties() .getLong(TCPropertiesConsts.L1_SHUTDOWN_THREADGROUP_GRACETIME); int threadCount = this.threadGroup.activeCount(); Thread[] t = new Thread[threadCount]; threadCount = this.threadGroup.enumerate(t); final long time = System.currentTimeMillis(); for (int x = 0; x < threadCount; x++) { long start = System.currentTimeMillis(); while (System.currentTimeMillis() < end && t[x].isAlive()) { t[x].join(1000); } logger.info( "Destroyed thread " + t[x].getName() + " time to destroy:" + (System.currentTimeMillis() - start) + " millis"); } logger.info( "time to destroy thread group:" + TimeUnit.SECONDS.convert(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS) + " seconds"); if (this.threadGroup.activeCount() > 0) { logger.warn( "Timed out waiting for TC thread group threads to die - probable shutdown memory leak\n" + "Live threads: " + getLiveThreads(this.threadGroup)); Thread threadGroupCleanerThread = new Thread( this.threadGroup.getParent(), new TCThreadGroupCleanerRunnable(threadGroup), "TCThreadGroup last chance cleaner thread"); threadGroupCleanerThread.setDaemon(true); threadGroupCleanerThread.start(); logger.warn("Spawning TCThreadGroup last chance cleaner thread"); } else { logger.info("Destroying TC thread group"); this.threadGroup.destroy(); } } catch (final Throwable t) { logger.error("Error destroying TC thread group", t); } finally { if (interrupted) { Thread.currentThread().interrupt(); } } } if (TCPropertiesImpl.getProperties() .getBoolean(TCPropertiesConsts.L1_SHUTDOWN_FORCE_FINALIZATION)) System.runFinalization(); }
public synchronized void start() { validateSecurityConfig(); final TCProperties tcProperties = TCPropertiesImpl.getProperties(); final int maxSize = tcProperties.getInt(TCPropertiesConsts.L1_SEDA_STAGE_SINK_CAPACITY); final SessionManager sessionManager = new SessionManagerImpl( new SessionManagerImpl.SequenceFactory() { @Override public Sequence newSequence() { return new SimpleSequence(); } }); this.threadGroup.addCallbackOnExitDefaultHandler( new CallbackOnExitHandler() { @Override public void callbackOnExit(CallbackOnExitState state) { cluster.fireNodeError(); } }); this.dumpHandler.registerForDump(new CallbackDumpAdapter(this.communicationStageManager)); final ReconnectConfig l1ReconnectConfig = getReconnectPropertiesFromServer(); final boolean useOOOLayer = l1ReconnectConfig.getReconnectEnabled(); final NetworkStackHarnessFactory networkStackHarnessFactory = getNetworkStackHarnessFactory(useOOOLayer, l1ReconnectConfig); this.counterManager = new CounterManagerImpl(); final MessageMonitor mm = MessageMonitorImpl.createMonitor(tcProperties, DSO_LOGGER); final TCMessageRouter messageRouter = new TCMessageRouterImpl(); this.communicationsManager = this.clientBuilder.createCommunicationsManager( mm, messageRouter, networkStackHarnessFactory, new NullConnectionPolicy(), this.connectionComponents.createConnectionInfoConfigItemByGroup().length, new HealthCheckerConfigClientImpl( tcProperties.getPropertiesFor(TCPropertiesConsts.L1_L2_HEALTH_CHECK_CATEGORY), "DSO Client"), getMessageTypeClassMapping(), ReconnectionRejectedHandlerL1.SINGLETON, securityManager, productId); DSO_LOGGER.debug("Created CommunicationsManager."); final ConnectionInfoConfig[] connectionInfoItems = this.connectionComponents.createConnectionInfoConfigItemByGroup(); final ConnectionInfo[] connectionInfo = connectionInfoItems[0].getConnectionInfos(); final String serverHost = connectionInfo[0].getHostname(); final int serverPort = connectionInfo[0].getPort(); clusterEventsStage = this.communicationStageManager.createStage( ClientConfigurationContext.CLUSTER_EVENTS_STAGE, ClusterInternalEventsContext.class, new ClusterInternalEventsHandler<ClusterInternalEventsContext>(cluster), 1, maxSize); final int socketConnectTimeout = tcProperties.getInt(TCPropertiesConsts.L1_SOCKET_CONNECT_TIMEOUT); if (socketConnectTimeout < 0) { throw new IllegalArgumentException("invalid socket time value: " + socketConnectTimeout); } this.channel = this.clientBuilder.createClientMessageChannel( this.communicationsManager, this.connectionComponents, sessionManager, MAX_CONNECT_TRIES, socketConnectTimeout, this); final ClientIDLoggerProvider cidLoggerProvider = new ClientIDLoggerProvider(this.channel); this.communicationStageManager.setLoggerProvider(cidLoggerProvider); DSO_LOGGER.debug("Created channel."); this.clientEntityManager = this.clientBuilder.createClientEntityManager(this.channel, this.communicationStageManager); RequestReceiveHandler receivingHandler = new RequestReceiveHandler(this.clientEntityManager); Stage<VoltronEntityResponse> entityResponseStage = this.communicationStageManager.createStage( ClientConfigurationContext.VOLTRON_ENTITY_RESPONSE_STAGE, VoltronEntityResponse.class, receivingHandler, 1, maxSize); Stage<Void> serverMessageStage = this.communicationStageManager.createStage( ClientConfigurationContext.SERVER_ENTITY_MESSAGE_STAGE, Void.class, new ServerMessageReceiveHandler<Void>(channel), 1, maxSize); TerracottaOperatorEventLogging.setNodeNameProvider(new ClientNameProvider(this.cluster)); final SampledRateCounterConfig sampledRateCounterConfig = new SampledRateCounterConfig(1, 300, true); this.counterManager.createCounter(sampledRateCounterConfig); this.counterManager.createCounter(sampledRateCounterConfig); // for SRA L1 Tx count final SampledCounterConfig sampledCounterConfig = new SampledCounterConfig(1, 300, true, 0L); this.counterManager.createCounter(sampledCounterConfig); this.threadGroup.addCallbackOnExitDefaultHandler( new CallbackDumpAdapter(this.clientEntityManager)); this.dumpHandler.registerForDump(new CallbackDumpAdapter(this.clientEntityManager)); final long timeOut = TCPropertiesImpl.getProperties().getLong(TCPropertiesConsts.LOGGING_LONG_GC_THRESHOLD); final LongGCLogger gcLogger = this.clientBuilder.createLongGCLogger(timeOut); this.tcMemManager.registerForMemoryEvents(gcLogger); // CDV-1181 warn if using CMS this.tcMemManager.checkGarbageCollectors(); this.threadIDManager = new ThreadIDManagerImpl(this.threadIDMap); // Setup the lock manager this.lockManager = this.clientBuilder.createLockManager( this.channel, new ClientIDLogger(this.channel, TCLogging.getLogger(ClientLockManager.class)), sessionManager, this.channel.getLockRequestMessageFactory(), this.threadIDManager, new ClientLockManagerConfigImpl( tcProperties.getPropertiesFor(TCPropertiesConsts.L1_LOCK_MANAGER_CATEGORY)), this.taskRunner); final CallbackDumpAdapter lockDumpAdapter = new CallbackDumpAdapter(this.lockManager); this.threadGroup.addCallbackOnExitDefaultHandler(lockDumpAdapter); this.dumpHandler.registerForDump(lockDumpAdapter); // Create the SEDA stages final Stage<Void> lockResponse = this.communicationStageManager.createStage( ClientConfigurationContext.LOCK_RESPONSE_STAGE, Void.class, new LockResponseHandler<Void>(sessionManager), 1, maxSize); final Stage<HydrateContext> hydrateStage = this.communicationStageManager.createStage( ClientConfigurationContext.HYDRATE_MESSAGE_STAGE, HydrateContext.class, new HydrateHandler(), 1, maxSize); // By design this stage needs to be single threaded. If it wasn't then cluster membership // messages could get // processed before the client handshake ack, and this client would get a faulty view of the // cluster at best, or // more likely an AssertionError final Stage<PauseContext> pauseStage = this.communicationStageManager.createStage( ClientConfigurationContext.CLIENT_COORDINATION_STAGE, PauseContext.class, new ClientCoordinationHandler<PauseContext>(), 1, maxSize); final Sink<PauseContext> pauseSink = pauseStage.getSink(); final Stage<Void> clusterMembershipEventStage = this.communicationStageManager.createStage( ClientConfigurationContext.CLUSTER_MEMBERSHIP_EVENT_STAGE, Void.class, new ClusterMembershipEventsHandler<Void>(cluster), 1, maxSize); final List<ClientHandshakeCallback> clientHandshakeCallbacks = new ArrayList<ClientHandshakeCallback>(); clientHandshakeCallbacks.add(this.lockManager); clientHandshakeCallbacks.add(this.clientEntityManager); final ProductInfo pInfo = ProductInfo.getInstance(); this.clientHandshakeManager = this.clientBuilder.createClientHandshakeManager( new ClientIDLogger(this.channel, TCLogging.getLogger(ClientHandshakeManagerImpl.class)), this.channel.getClientHandshakeMessageFactory(), pauseSink, sessionManager, cluster, this.uuid, this.name, pInfo.version(), Collections.unmodifiableCollection(clientHandshakeCallbacks)); ClientChannelEventController.connectChannelEventListener( channel, pauseSink, clientHandshakeManager); this.shutdownManager = new ClientShutdownManager(this, connectionComponents); final ClientConfigurationContext cc = new ClientConfigurationContext( this.communicationStageManager, this.lockManager, this.clientEntityManager, this.clientHandshakeManager); // DO NOT create any stages after this call this.communicationStageManager.startAll(cc, Collections.<PostInit>emptyList()); initChannelMessageRouter( messageRouter, hydrateStage.getSink(), lockResponse.getSink(), pauseSink, clusterMembershipEventStage.getSink(), entityResponseStage.getSink(), serverMessageStage.getSink()); new Thread( threadGroup, new Runnable() { public void run() { while (!clientStopped.isSet()) { try { openChannel(serverHost, serverPort); waitForHandshake(); connectionMade(); break; } catch (InterruptedException ie) { // We are in the process of letting the thread terminate so we don't handle this // in a special way. } } // don't reset interrupted, thread is done } }, "Connection Establisher - " + uuid) .start(); }
@SuppressWarnings("resource") public void setLogDirectory(File theDirectory, int processType) { Assert.assertNotNull(theDirectory); if (theDirectory.getName().trim().equalsIgnoreCase("stdout:") || theDirectory.getName().trim().equalsIgnoreCase("stderr:")) { if (currentLoggingDirectory != null && currentLoggingDirectory.getName().trim().equalsIgnoreCase(theDirectory.getName())) { // Nothing to do; great! return; } delegateFileAppender.setDelegate(new NullAppender()); consoleAppender.setLayout(new PatternLayout(CONSOLE_LOGGING_ONLY_PATTERN)); // Logger.addAppender() doesn't double-add, so this is safe Logger.getRootLogger().addAppender(consoleAppender); if (buffering) { BufferingAppender realBufferingAppender = (BufferingAppender) delegateBufferingAppender.setDelegate(new NullAppender()); realBufferingAppender.stopAndSendContentsTo(consoleAppender); realBufferingAppender.close(); buffering = false; } boolean stdout = theDirectory.getName().trim().equalsIgnoreCase("stdout:"); getConsoleLogger() .info( "All logging information now output to standard " + (stdout ? "output" : "error") + "."); return; } synchronized (TCLoggingLog4J.class) { if (currentLoggingDirectory != null) { try { if (theDirectory.getCanonicalPath().equals(currentLoggingDirectory.getCanonicalPath())) { return; } } catch (IOException ioe) { // oh, well -- what can we do? we'll continue on. } } } try { FileUtils.forceMkdir(theDirectory); } catch (IOException ioe) { reportLoggingError( "We can't create the directory '" + theDirectory.getAbsolutePath() + "' that you specified for your logs.", ioe); return; } if (!theDirectory.canWrite()) { // formatting reportLoggingError( "The log directory, '" + theDirectory.getAbsolutePath() + "', can't be written to.", null); return; } FileLock thisDirectoryLock = null; if (!lockingDisabled) { File lockFile = new File(theDirectory, LOCK_FILE_NAME); try { lockFile.createNewFile(); Assert.eval(lockFile.exists()); FileChannel channel = new RandomAccessFile(lockFile, "rw").getChannel(); thisDirectoryLock = channel.tryLock(); if (thisDirectoryLock == null) { reportLoggingError( "The log directory, '" + theDirectory.getAbsolutePath() + "', is already in use by another " + "Terracotta process. Logging will proceed to the console only.", null); return; } } catch (OverlappingFileLockException ofle) { // This VM already holds the lock; no problem } catch (IOException ioe) { reportLoggingError( "We can't lock the file '" + lockFile.getAbsolutePath() + "', to make sure that only one " + "Terracotta process is using this directory for logging. This may be a permission " + "issue, or some unexpected error. Logging will proceed to the console only.", ioe); return; } } RollingFileAppender newFileAppender; String logFileName; switch (processType) { case PROCESS_TYPE_L1: logFileName = TERRACOTTA_L1_LOG_FILE_NAME; break; case PROCESS_TYPE_L2: logFileName = TERRACOTTA_L2_LOG_FILE_NAME; break; case PROCESS_TYPE_GENERIC: logFileName = TERRACOTTA_GENERIC_LOG_FILE_NAME; break; default: throw Assert.failure("Unknown process type: " + processType); } String logFilePath = new File(theDirectory, logFileName).getAbsolutePath(); synchronized (TCLoggingLog4J.class) { try { TCProperties props = TCPropertiesImpl.getProperties().getPropertiesFor(TCPropertiesConsts.LOGGING_CATEGORY); newFileAppender = new TCRollingFileAppender(new PatternLayout(FILE_AND_JMX_PATTERN), logFilePath, true); newFileAppender.setName("file appender"); int maxLogFileSize = props.getInt(MAX_LOG_FILE_SIZE_PROPERTY, DEFAULT_MAX_LOG_FILE_SIZE); newFileAppender.setMaxFileSize(maxLogFileSize + "MB"); newFileAppender.setMaxBackupIndex(props.getInt(MAX_BACKUPS_PROPERTY, DEFAULT_MAX_BACKUPS)); // This makes us start with a new file each time. newFileAppender.rollOver(); // Note: order of operations is very important here. We start the new appender before we // close and remove the // old one so that you don't drop any log records. Appender oldFileAppender = delegateFileAppender.setDelegate(newFileAppender); if (oldFileAppender != null) { oldFileAppender.close(); } if (buffering) { BufferingAppender realBufferingAppender = (BufferingAppender) delegateBufferingAppender.setDelegate(new NullAppender()); realBufferingAppender.stopAndSendContentsTo(delegateFileAppender); realBufferingAppender.close(); buffering = false; } currentLoggingDirectory = theDirectory; if (currentLoggingDirectoryFileLock != null) currentLoggingDirectoryFileLock.release(); currentLoggingDirectoryFileLock = thisDirectoryLock; } catch (IOException ioe) { reportLoggingError( "We were unable to switch the logging system to log to '" + logFilePath + "'.", ioe); } } getConsoleLogger().info("Log file: '" + logFilePath + "'."); writeSystemProperties(); }