@Override public void run() { LOGGER.info("ClusterManager thread starting."); long delay = taskManager.getConfiguration().getNodeRegistrationCycleTime() * 1000L; while (canRun) { OperationResult result = new OperationResult(ClusterManagerThread.class + ".run"); try { checkSystemConfigurationChanged(result); // these checks are separate in order to prevent a failure in one method blocking // execution of others try { checkClusterConfiguration(result); // if error, the scheduler will be stopped nodeRegistrar.updateNodeObject( result); // however, we want to update repo even in that case } catch (Throwable t) { LoggingUtils.logException( LOGGER, "Unexpected exception while checking cluster configuration; continuing execution.", t); } try { checkWaitingTasks(result); } catch (Throwable t) { LoggingUtils.logException( LOGGER, "Unexpected exception while checking waiting tasks; continuing execution.", t); } try { checkStalledTasks(result); } catch (Throwable t) { LoggingUtils.logException( LOGGER, "Unexpected exception while checking stalled tasks; continuing execution.", t); } } catch (Throwable t) { LoggingUtils.logException( LOGGER, "Unexpected exception in ClusterManager thread; continuing execution.", t); } LOGGER.trace("ClusterManager thread sleeping for " + delay + " msec"); try { Thread.sleep(delay); } catch (InterruptedException e) { LOGGER.trace("ClusterManager thread interrupted."); } } LOGGER.info("ClusterManager thread stopping."); }
@Override public void run() { isAppenderThread.set(Boolean.TRUE); // LOG4J2-485 while (!shutdown) { Serializable s; try { s = queue.take(); if (s != null && s instanceof String && SHUTDOWN.equals(s.toString())) { shutdown = true; continue; } } catch (final InterruptedException ex) { break; // LOG4J2-830 } final Log4jLogEvent event = Log4jLogEvent.deserialize(s); event.setEndOfBatch(queue.isEmpty()); final boolean success = callAppenders(event); if (!success && errorAppender != null) { try { errorAppender.callAppender(event); } catch (final Exception ex) { // Silently accept the error. } } } // Process any remaining items in the queue. LOGGER.trace( "AsyncAppender.AsyncThread shutting down. Processing remaining {} queue events.", queue.size()); int count = 0; int ignored = 0; while (!queue.isEmpty()) { try { final Serializable s = queue.take(); if (Log4jLogEvent.canDeserialize(s)) { final Log4jLogEvent event = Log4jLogEvent.deserialize(s); event.setEndOfBatch(queue.isEmpty()); callAppenders(event); count++; } else { ignored++; LOGGER.trace("Ignoring event of class {}", s.getClass().getName()); } } catch (final InterruptedException ex) { // May have been interrupted to shut down. // Here we ignore interrupts and try to process all remaining events. } } LOGGER.trace( "AsyncAppender.AsyncThread stopped. Queue has {} events remaining. " + "Processed {} and ignored {} events since shutdown started.", queue.size(), count, ignored); }
/** * Create the RollingRandomAccessFileManager. * * @param name The name of the entity to manage. * @param data The data required to create the entity. * @return a RollingFileManager. */ @Override public RollingRandomAccessFileManager createManager(final String name, final FactoryData data) { final File file = new File(name); final File parent = file.getParentFile(); if (null != parent && !parent.exists()) { parent.mkdirs(); } if (!data.append) { file.delete(); } final long size = data.append ? file.length() : 0; final long time = file.exists() ? file.lastModified() : System.currentTimeMillis(); final boolean writeHeader = !data.append || !file.exists(); RandomAccessFile raf = null; try { raf = new RandomAccessFile(name, "rw"); if (data.append) { final long length = raf.length(); LOGGER.trace("RandomAccessFile {} seek to {}", name, length); raf.seek(length); } else { LOGGER.trace("RandomAccessFile {} set length to 0", name); raf.setLength(0); } return new RollingRandomAccessFileManager( data.getLoggerContext(), raf, name, data.pattern, NullOutputStream.getInstance(), data.append, data.immediateFlush, data.bufferSize, size, time, data.policy, data.strategy, data.advertiseURI, data.layout, writeHeader); } catch (final IOException ex) { LOGGER.error("Cannot access RandomAccessFile " + ex, ex); if (raf != null) { try { raf.close(); } catch (final IOException e) { LOGGER.error("Cannot close RandomAccessFile {}", name, e); } } } return null; }
public ZLDBackendState getZLDBackendState(SwordBookMetaData metadata) throws BookException { ensureNotShuttingDown(); ZLDBackendState state = getInstance(metadata); if (state == null) { LOGGER.trace("Initializing: {}", metadata.getInitials()); return new ZLDBackendState(metadata); } LOGGER.trace("Reusing: {}", metadata.getInitials()); return state; }
@Override public void run() { while (!shutdown) { LogEvent event; try { event = queue.take(); if (event == SHUTDOWN) { shutdown = true; continue; } } catch (final InterruptedException ex) { break; // LOG4J2-830 } event.setEndOfBatch(queue.isEmpty()); final boolean success = callAppenders(event); if (!success && errorAppender != null) { try { errorAppender.callAppender(event); } catch (final Exception ex) { // Silently accept the error. } } } // Process any remaining items in the queue. LOGGER.trace( "AsyncAppender.AsyncThread shutting down. Processing remaining {} queue events.", queue.size()); int count = 0; int ignored = 0; while (!queue.isEmpty()) { try { final LogEvent event = queue.take(); if (event instanceof Log4jLogEvent) { final Log4jLogEvent logEvent = (Log4jLogEvent) event; logEvent.setEndOfBatch(queue.isEmpty()); callAppenders(logEvent); count++; } else { ignored++; LOGGER.trace("Ignoring event of class {}", event.getClass().getName()); } } catch (final InterruptedException ex) { // May have been interrupted to shut down. // Here we ignore interrupts and try to process all remaining events. } } LOGGER.trace( "AsyncAppender.AsyncThread stopped. Queue has {} events remaining. " + "Processed {} and ignored {} events since shutdown started.", queue.size(), count, ignored); }
@Override public void stop() { super.stop(); LOGGER.trace("AsyncAppender stopping. Queue still has {} events.", queue.size()); thread.shutdown(); try { thread.join(); } catch (final InterruptedException ex) { LOGGER.warn("Interrupted while stopping AsyncAppender {}", getName()); } LOGGER.trace("AsyncAppender stopped. Queue has {} events.", queue.size()); }
@Override public void innerRun() { EventLogger.queueEnd(_event); if (_event instanceof LastMessageEvent) { LOGGER.trace("messageThread : LastMessageEvent arrived"); _cell.messageArrived((MessageEvent) _event); } else if (_event instanceof RoutedMessageEvent) { LOGGER.trace("messageThread : RoutedMessageEvent arrived"); _cell.messageArrived((RoutedMessageEvent) _event); } else if (_event instanceof MessageEvent) { MessageEvent msgEvent = (MessageEvent) _event; LOGGER.trace("messageThread : MessageEvent arrived"); CellMessage msg; try { msg = msgEvent.getMessage().decode(); } catch (SerializationException e) { CellMessage envelope = msgEvent.getMessage(); LOGGER.error( String.format( "Discarding a malformed message from %s with UOID %s and session [%s]: %s", envelope.getSourcePath(), envelope.getUOID(), envelope.getSession(), e.getMessage()), e); return; } CDC.setMessageContext(msg); try { LOGGER.trace("messageThread : delivering message: {}", msg); _cell.messageArrived(new MessageEvent(msg)); LOGGER.trace("messageThread : delivering message done: {}", msg); } catch (RuntimeException e) { if (!msg.isReply()) { try { msg.revertDirection(); msg.setMessageObject(e); sendMessage(msg); } catch (NoRouteToCellException f) { LOGGER.error("PANIC : Problem returning answer: {}", f); } } throw e; } finally { CDC.clearMessageContext(); } } }
public void process(HttpRequest request, HttpContext context) throws HttpException, IOException { if (!request.containsHeader(ENCODING_HEADER_NAME)) { LOGGER.trace("add gzip header."); request.addHeader(ENCODING_HEADER_NAME, GZIP_ENCODING_VALUE); } }
public synchronized void send(final BatchEvent events) { if (rpcClient == null) { rpcClient = connect(agents, retries, connectTimeoutMillis, requestTimeoutMillis); } if (rpcClient != null) { try { LOGGER.trace("Sending batch of {} events", events.getEvents().size()); rpcClient.appendBatch(events.getEvents()); } catch (final Exception ex) { rpcClient.close(); rpcClient = null; final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg, ex); throw new AppenderLoggingException("No Flume agents are available"); } } else { final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg); throw new AppenderLoggingException("No Flume agents are available"); } }
@Override public void innerRun() { try (CDC ignored = _lock.getCdc().restore()) { CellMessageAnswerable callback = _lock.getCallback(); CellMessage answer; Object obj; try { answer = _message.decode(); obj = answer.getMessageObject(); } catch (SerializationException e) { LOGGER.warn(e.getMessage()); obj = e; answer = null; } EventLogger.sendEnd(_lock.getMessage()); if (obj instanceof Exception) { callback.exceptionArrived(_lock.getMessage(), (Exception) obj); } else { callback.answerArrived(_lock.getMessage(), answer); } LOGGER.trace("addToEventQueue : callback done for : {}", _message); } }
/** * to get the provider. * * @param settop {@linkplain SettopInfo} * @return ImageCompareProvider * @throws ProviderCreationException */ @Override public ImageCompareProvider getProvider(SettopInfo settop) throws ProviderCreationException { LOGGER.trace("Creating ImageCompareProvider for [" + settop.getHostMacAddress() + "]"); VideoProvider videoProvider = videoProviderFactory.getProvider(settop); ImageCompareProvider icProvider = getProvider(videoProvider); return icProvider; }
public void release(OpenFileState fileState) { if (fileState == null) { // can't release anything. JSword has failed to open a file state, // and a finally block is trying to close this return; } fileState.setLastAccess(System.currentTimeMillis()); // instead of releasing, we add to our queue SwordBookMetaData bmd = fileState.getBookMetaData(); Queue<OpenFileState> queueForMeta = getQueueForMeta(bmd); LOGGER.trace("Offering to releasing: {}", bmd.getInitials()); boolean offered = queueForMeta.offer(fileState); // ignore if we couldn't offer to the queue if (!offered) { LOGGER.trace("Released: {}", bmd.getInitials()); fileState.releaseResources(); } }
@Override Source getSource(final URI uri, final AbstractProcessor processor) throws ProcessorException { try { final File file = getRepositoryFile(uri, processor); LOGGER.trace("Repo file: {}", file.getAbsolutePath()); return new StreamSource(new FileInputStream(file), uri.toString()); } catch (final FileNotFoundException e) { LOGGER.debug("Failed to lookup the file in repository", e); LOGGER.warn("Could not resolve {} to repository file", uri); throw new ProcessorException(e); } }
@Override Source getSource(final URI uri, final AbstractProcessor processor) throws ProcessorException { final String productId = uri.getHost(); final String path = uri.getPath(); final Descriptor descriptor = processor.getFactory().getRepository().getDescriptor(productId); try { final URI resolvedUri = UriManipulator.resolve(descriptor.getRootUri(), path.substring(1)); LOGGER.trace("URI {} resolved to {}", uri, resolvedUri); return new StreamSource(resolvedUri.toURL().openStream(), uri.toString()); } catch (final IOException | URISyntaxException e) { throw new ProcessorException(e); } }
@Override public boolean stop(final long timeout, final TimeUnit timeUnit) { setStopping(); super.stop(timeout, timeUnit, false); LOGGER.trace("AsyncAppender stopping. Queue still has {} events.", queue.size()); thread.shutdown(); try { thread.join(shutdownTimeout); } catch (final InterruptedException ex) { LOGGER.warn("Interrupted while stopping AsyncAppender {}", getName()); } LOGGER.trace("AsyncAppender stopped. Queue has {} events.", queue.size()); if (DiscardingAsyncQueueFullPolicy.getDiscardCount(asyncQueueFullPolicy) > 0) { LOGGER.trace( "AsyncAppender: {} discarded {} events.", asyncQueueFullPolicy, DiscardingAsyncQueueFullPolicy.getDiscardCount(asyncQueueFullPolicy)); } setStopped(); return true; }
/** * Maps the attributes. * * @param source The source object. * @param target The target object. */ public void mapAttributes(final AttributeSource source, final AttributeSink target) { if (source == null) { throw new IllegalArgumentException( getClass().getName() + ": source object must not be null."); } if (target == null) { throw new IllegalArgumentException( getClass().getName() + ": target object must not be null."); } for (final Map.Entry<String, String> entry : this.attributeMap.entrySet()) { final String sourceAttr = entry.getKey(); final String targetAttr = entry.getValue(); LOGGER.trace("\t'" + sourceAttr + "'--> '" + targetAttr + "'"); AttributeUtil.copyValues(source, sourceAttr, target, targetAttr); } }
public synchronized ProxyTarget releaseTarget() { // When releasing, we unget the service so we don't keep stale references... service = null; if (delegatingProxy != null) { // return the delegate proxy for further usage... return delegatingProxy.releaseTarget(); } else { try { bundleContext.ungetService(reference); } catch (RuntimeException e) { // Sometimes a RuntimeException might occur here, we catch it to not prevent any other // cleanup actions LOGGER.trace("RuntimeException while ungetting service", e); } return this; } }
@Override public void viewAccepted(View newView) { LOGGER.trace( "Members of '{0}' cluster have changed: {1}", clusteringConfiguration.getClusterName(), newView); if (newView.getMembers().size() > 1) { if (multipleAddressesInCluster.compareAndSet(false, true)) { LOGGER.debug( "There are now multiple members of cluster '{0}'; changes will be propagated throughout the cluster", clusteringConfiguration.getClusterName()); } } else { if (multipleAddressesInCluster.compareAndSet(true, false)) { LOGGER.debug( "There is only one member of cluster '{0}'; changes will be propagated locally only", clusteringConfiguration.getClusterName()); } } }
public HttpObject serverToProxyResponse(HttpObject httpObject) { if (httpObject instanceof DefaultHttpResponse) { DefaultHttpResponse fullreq = (DefaultHttpResponse) httpObject; HttpHeaders headers = fullreq.headers(); final String uri = this.originalRequest.getUri(); if (fullreq.getStatus().code() >= 200 && fullreq.getStatus().code() < 300) { if (checkForFilters(headers) || checkForFileExtension(this.originalRequest.getUri())) { LOGGER.debug("asked for cache for resource {}", uri); cacheService.askForCache(this.originalRequest.getUri()); } else { LOGGER.trace("resouce {} is NOT going to be cached", uri); } } } return httpObject; }
public static File getHomeDirectory() { if (differHome == null) { differHome = System.getProperty("user.home"); differHome += File.separatorChar + ".differ"; LOGGER.trace("Differ Home Directory: " + differHome); // If the home directory doesnt exist create it File differHomeFile = new File(differHome); if (!differHomeFile.exists()) { differHomeFile.mkdir(); } // Same with the plugin subdirectory File differHomeFilePluginDirectory = new File(differHomeFile, "plugins"); if (!differHomeFilePluginDirectory.exists()) { differHomeFilePluginDirectory.mkdir(); } // Same with users subdirectory File differHomeFileUsersDirectory = new File(differHomeFile, "users"); if (!differHomeFileUsersDirectory.exists()) { differHomeFileUsersDirectory.mkdir(); } // Same with logs subdirectory File differHomeLogsDirectory = new File(differHomeFile, "logs"); if (!differHomeLogsDirectory.exists()) { differHomeLogsDirectory.mkdir(); } } File homeDir = new File(differHome); if (!homeDir.exists()) { LOGGER.error("Differ home directory unable to be created at " + homeDir.getAbsolutePath()); } return homeDir; }
@Override Result getResult(final URI uri, final AbstractProcessor processor) throws ProcessorException { final File repoFile = getRepositoryFile(uri, processor); LOGGER.trace("Output result file {}", repoFile.getAbsolutePath()); return new StreamResult(repoFile); }
/** * Called by the server to run the application and begin the session FIXME: move same parts to * firstApplicationStartup() */ @Override public void applicationInit() { // Setup Apache Log4j Configuration BasicConfigurator.configure(); // BouncyCastle Setup Security.addProvider(new BouncyCastleProvider()); setTheme(DIFFER_THEME_NAME); // Set to custom differ theme LOGGER.trace("Loaded Vaadin theme: " + DIFFER_THEME_NAME); // Get Application Context WebApplicationContext context = (WebApplicationContext) getContext(); // Set Context Locale to Browser Locale Locale locale = context.getBrowser().getLocale(); setLocale(locale); LOGGER.trace("Session Locale: " + locale.getDisplayName()); // Add this as a listener to the context transaction event pump context.addTransactionListener(this); // Load Differ Properties into JVM File differProps = new File(new File(getHomeDirectory(), "resources"), "differ.properties"); if (differProps.exists() && differProps.canRead()) { FileInputStream propStream = null; try { propStream = new FileInputStream(differProps); System.getProperties().load(propStream); LOGGER.info("Loaded differ.properties"); } catch (IOException e) { LOGGER.error("Unable to load differ.properties!", e); } finally { if (propStream != null) { try { propStream.close(); } catch (IOException e) { LOGGER.error("Unable to close differ.properties file stream.", e); } } } } // Setup Apache Log4j Configuration for file logging if the property is set in differ props if (System.getProperty("differ.logging.file") != null && System.getProperty("differ.logging.file").equalsIgnoreCase("true")) { String fileLocation = System.getProperty("differ.logging.file.location"); if (fileLocation == null) { // Create a logging file in the logs directory that is names by the current nanotime Calendar cal = Calendar.getInstance(); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH-mm-ss"); String fileName = sdf.format(cal.getTime()) + ".log"; fileLocation = new File(new File(getHomeDirectory(), "logs"), fileName).getAbsolutePath(); } File loggingFile = new File(fileLocation); if (loggingFile.exists()) { LOGGER.warn("differ.logging.file.location is an invalid location"); } else { try { BasicConfigurator.configure( new FileAppender( new PatternLayout(PatternLayout.DEFAULT_CONVERSION_PATTERN), loggingFile.getAbsolutePath())); } catch (IOException e) { LOGGER.error("Unable to create logging file", e); } } } ServletContext servletContext = ((WebApplicationContext) this.getContext()).getHttpSession().getServletContext(); applicationContext = WebApplicationContextUtils.getRequiredWebApplicationContext(servletContext); userManager = (UserManager) applicationContext.getBean("userManager"); imageManager = (ImageManager) applicationContext.getBean("imageManager"); resultManager = (ResultManager) applicationContext.getBean("resultManager"); MainDifferWindow mainWindow = new MainDifferWindow(); mainWindow.setSizeUndefined(); setMainWindow(mainWindow); }
private void reduceWordDB() throws LocalDBException { if (localDB == null || localDB.status() != LocalDB.Status.OPEN) { return; } final long oldestEntryAge = System.currentTimeMillis() - oldestEntry; if (oldestEntryAge < settings.maxAgeMs) { LOGGER.debug( "skipping wordDB reduce operation, eldestEntry=" + TimeDuration.asCompactString(oldestEntryAge) + ", maxAge=" + TimeDuration.asCompactString(settings.maxAgeMs)); return; } final long startTime = System.currentTimeMillis(); final int initialSize = size(); int removeCount = 0; long localOldestEntry = System.currentTimeMillis(); LOGGER.debug( "beginning wordDB reduce operation, examining " + initialSize + " words for entries older than " + TimeDuration.asCompactString(settings.maxAgeMs)); LocalDB.LocalDBIterator<String> keyIterator = null; try { keyIterator = localDB.iterator(WORDS_DB); while (status == STATUS.OPEN && keyIterator.hasNext()) { final String key = keyIterator.next(); final String value = localDB.get(WORDS_DB, key); final long timeStamp = Long.parseLong(value); final long entryAge = System.currentTimeMillis() - timeStamp; if (entryAge > settings.maxAgeMs) { localDB.remove(WORDS_DB, key); removeCount++; if (removeCount % 1000 == 0) { LOGGER.trace( "wordDB reduce operation in progress, removed=" + removeCount + ", total=" + (initialSize - removeCount)); } } else { localOldestEntry = timeStamp < localOldestEntry ? timeStamp : localOldestEntry; } sleeper.sleep(); } } finally { try { if (keyIterator != null) { keyIterator.close(); } } catch (Exception e) { LOGGER.warn("error returning LocalDB iterator: " + e.getMessage()); } } // update the oldest entry if (status == STATUS.OPEN) { oldestEntry = localOldestEntry; localDB.put(META_DB, KEY_OLDEST_ENTRY, Long.toString(oldestEntry)); } final StringBuilder sb = new StringBuilder(); sb.append("completed wordDB reduce operation"); sb.append(", removed=").append(removeCount); sb.append(", totalRemaining=").append(size()); sb.append(", oldestEntry=").append(TimeDuration.asCompactString(oldestEntry)); sb.append(" in ") .append(TimeDuration.asCompactString(System.currentTimeMillis() - startTime)); LOGGER.debug(sb.toString()); }