@Override public synchronized void send(final Event event) { if (rpcClient == null) { rpcClient = connect(agents, retries, connectTimeoutMillis, requestTimeoutMillis); } if (rpcClient != null) { try { rpcClient.append(event); } catch (final Exception ex) { rpcClient.close(); rpcClient = null; final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg, ex); throw new AppenderLoggingException("No Flume agents are available"); } } else { final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg); throw new AppenderLoggingException("No Flume agents are available"); } }
@Override public void run() { /* Legacy UDP location manager daemon. */ DatagramPacket packet; while (!Thread.currentThread().isInterrupted()) { try { packet = new DatagramPacket(new byte[1024], 1024); socket.receive(packet); } catch (SocketException e) { if (!Thread.currentThread().isInterrupted()) { LOGGER.warn("Exception in Server receive loop (exiting)", e); } break; } catch (Exception ie) { LOGGER.warn("Exception in Server receive loop (exiting)", ie); break; } try { process(packet); socket.send(packet); } catch (Exception se) { LOGGER.warn("Exception in send ", se); } } socket.close(); }
@Override public String toModelName(final String name) { final String sanitizedName = sanitizeName(modelNamePrefix + name + modelNameSuffix); // camelize the model name // phone_number => PhoneNumber final String camelizedName = camelize(sanitizedName); // model name cannot use reserved keyword, e.g. return if (isReservedWord(camelizedName)) { final String modelName = "Model" + camelizedName; LOGGER.warn( camelizedName + " (reserved word) cannot be used as model name. Renamed to " + modelName); return modelName; } // model name starts with number if (name.matches("^\\d.*")) { final String modelName = "Model" + camelizedName; // e.g. 200Response => Model200Response (after camelize) LOGGER.warn( name + " (model name starts with number) cannot be used as model name. Renamed to " + modelName); return modelName; } return camelizedName; }
@Override public void prepareForRendering(String layer, TileIndex tile, Iterable<TileIndex> tileSet) { try { TileSerializer<?> serializer = produce(TileSerializer.class); String pyramidId = getPropertyValue(LayerConfiguration.LAYER_ID); _pyramidIO.requestTiles(pyramidId, serializer, tileSet); } catch (IOException e) { LOGGER.warn("Error requesting tile set", e); } catch (ConfigurationException e) { LOGGER.warn("Error requesting tile set", e); } }
private void removeFile(PnfsId pnfsId) { try { repository.setState(pnfsId, ReplicaState.REMOVED); } catch (IllegalTransitionException f) { LOGGER.warn( "File not found in name space, but failed to remove {}: {}", pnfsId, f.getMessage()); } catch (CacheException f) { LOGGER.error( "File not found in name space, but failed to remove {}: {}", pnfsId, f.getMessage()); } catch (InterruptedException f) { LOGGER.warn("File not found in name space, but failed to remove {}: {}", pnfsId, f); } }
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { Channel channel = new NettyChannel(ctx); final String remoteAddress = RemotingHelper.parseChannelRemoteAddr(channel); LOGGER.warn("CLIENT : exceptionCaught {}", remoteAddress); LOGGER.warn("CLIENT : exceptionCaught exception.", cause); closeChannel(channel); if (channelEventListener != null) { putRemotingEvent(new RemotingEvent(RemotingEventType.EXCEPTION, remoteAddress, channel)); } }
/* @see BaseTiffReader#initMetadataStore() */ protected void initMetadataStore() throws FormatException { super.initMetadataStore(); MetadataStore store = makeFilterMetadata(); MetadataTools.populatePixels(store, this, true); if (date != null) { date = DateTools.formatDate(date, DATE_FORMAT); if (date != null) { store.setImageAcquisitionDate(new Timestamp(date), 0); } } if (getMetadataOptions().getMetadataLevel() != MetadataLevel.MINIMUM) { store.setImageDescription(MAGIC_STRING, 0); if (scaling > 0) { store.setPixelsPhysicalSizeX(new PositiveFloat(scaling), 0); store.setPixelsPhysicalSizeY(new PositiveFloat(scaling), 0); } else { LOGGER.warn("Expected positive value for PhysicalSize; got {}", scaling); } String instrument = MetadataTools.createLSID("Instrument", 0); store.setInstrumentID(instrument, 0); store.setImageInstrumentRef(instrument, 0); store.setObjectiveID(MetadataTools.createLSID("Objective", 0, 0), 0, 0); if (magnification > 0) { store.setObjectiveNominalMagnification(new PositiveInteger(magnification), 0, 0); } else { LOGGER.warn("Expected positive value for NominalMagnification; got {}", magnification); } store.setObjectiveImmersion(getImmersion(immersion), 0, 0); String detector = MetadataTools.createLSID("Detector", 0, 0); store.setDetectorID(detector, 0, 0); store.setDetectorModel(cameraType + " " + cameraName, 0, 0); store.setDetectorType(getDetectorType("CCD"), 0, 0); for (int i = 0; i < getSizeC(); i++) { store.setDetectorSettingsID(detector, 0, i); store.setDetectorSettingsBinning(getBinning(binning), 0, i); } for (int i = 0; i < getImageCount(); i++) { int[] zct = getZCTCoords(i); store.setPlaneExposureTime(exposureTimes.get(zct[1]) / 1000000, 0, i); } } }
/** * Removes the timeout role from the given user. This does NOT create or manage any * storage/persistence, it only sets the user's roles * * @param user The user to remove the timeout role * @param server The server on which to remove the user from the timeout role * @param invocationChannel The channel to send messages on error */ public boolean removeTimeoutRole(User user, Server server, Channel invocationChannel) { String serverId = server.getId(); TempServerConfig serverConfig = serverStorage.get(serverId); if (serverConfig == null) { serverConfig = new TempServerConfig(serverId); serverStorage.put(serverId, serverConfig); } ServerTimeoutStorage storage = serverConfig.getServerTimeouts(); String serverName = server.getName(); if (storage != null && storage.getTimeoutRoleId() != null) { String timeoutRoleId = storage.getTimeoutRoleId(); Role timeoutRole = apiClient.getRole(timeoutRoleId, server); if (timeoutRole != NO_ROLE) { // Get roles Set<Role> userRoles = apiClient.getMemberRoles(apiClient.getUserMember(user, server), server); // Delete the ban role LinkedHashSet<String> newRoles = new LinkedHashSet<>(userRoles.size() - 1); userRoles .stream() .map(Role::getId) .filter(s -> !timeoutRoleId.equals(s)) .forEach(newRoles::add); // Update apiClient.updateRoles(user, server, newRoles); return userRoles.size() == newRoles.size(); } else { LOGGER.warn( "Timeout role ID {} for server {} ({}) does not exist", timeoutRoleId, serverName, serverId); apiClient.sendMessage( loc.localize("message.mod.timeout.bad_role", timeoutRoleId), invocationChannel); } } else { storage = new ServerTimeoutStorage(); serverConfig.setServerTimeouts(storage); serverStorage.put(serverId, serverConfig); LOGGER.warn( "Timeout role for server {} ({}) is not configured", storage.getTimeoutRoleId(), serverName, serverId); apiClient.sendMessage(loc.localize("message.mod.timeout.not_configured"), invocationChannel); } return false; }
private void refreshTimeoutOnEvade(User user, Server server) { ServerTimeout timeout = SafeNav.of(serverStorage.get(server.getId())) .next(TempServerConfig::getServerTimeouts) .next(ServerTimeoutStorage::getTimeouts) .next(timeouts -> timeouts.get(user.getId())) .get(); if (timeout == null) { LOGGER.warn( "Attempted to refresh a timeout on a user who was not timed out! {} ({})", user.getUsername(), user.getId()); return; } LOGGER.info( "User {} ({}) attempted to evade a timeout on {} ({})!", user.getUsername(), user.getId(), server.getName(), server.getId()); Channel channel = apiClient.getChannelById(server.getId(), server); apiClient.sendMessage( loc.localize( "listener.mod.timeout.on_evasion", user.getId(), formatDuration(Duration.between(Instant.now(), timeout.getEndTime())), formatInstant(timeout.getEndTime())), channel); applyTimeoutRole(user, server, channel); }
@Override public void writeRevision(final Revision rev) throws IOException { final ParsedPage pp = parser.parse(rev.Text); if (pp == null) { LOGGER.warn("Could not parse page with title {}", pageTitle); } else if (pp.getSections() != null) { final Set<String> declinations = getDeclinations(pp.getTemplates()); if (!declinations.isEmpty()) { nounTitles.addAll(declinations); } for (final Section section : pp.getSections()) { final List<Template> partOfSpeechTemplates = getPartOfSpeechTemplates(section); if (!partOfSpeechTemplates.isEmpty()) { for (final Template template : partOfSpeechTemplates) { if (isNoun.f(getFirstParameter.f(template))) { nounTitles.add(pageTitle); if (declinations.isEmpty() && LOGGER.isDebugEnabled()) { LOGGER.debug("Found no declinations for page {}", pageTitle); } } } return; } } if (LOGGER.isDebugEnabled() && rev.Text.contains("Substantiv")) { LOGGER.debug( "No part-of-speech found for {} (which indeed contains 'Substantiv')", pageTitle); } } }
@Override public BigDecimal getPriceAvgBuyFor( PortfolioShare portfolioShare, Date currentStartDate, Date currentEndDate, Currency targetCurrency) { BigDecimal totalMoneyInvested = BigDecimal.ZERO; BigDecimal totalQuantityBought = BigDecimal.ZERO; for (TransactionElement te : headTransactionsTo(currentStartDate, currentEndDate)) { if (te.transactionType().equals(TransactionType.AIN) && te.getStock().equals(portfolioShare.getStock())) { BigDecimal convertedPrice = getCurrencyConverter() .convert(te.getCurrency(), targetCurrency, te.getPrice(), te.getDate()); totalMoneyInvested = totalMoneyInvested.add( convertedPrice.multiply(te.getQuantity()).setScale(10, BigDecimal.ROUND_HALF_EVEN)); totalQuantityBought = totalQuantityBought.add(te.getQuantity()); } } if (totalQuantityBought.compareTo(BigDecimal.ZERO) == 0) { LOGGER.warn("getPriceAvgBuyFor : Bought Transaction sum to zero for " + portfolioShare); return BigDecimal.ZERO; } else { return totalMoneyInvested.divide(totalQuantityBought, 10, BigDecimal.ROUND_HALF_EVEN); } }
private void done(Throwable cause) { PnfsId pnfsId = getFileAttributes().getPnfsId(); if (cause != null) { if (cause instanceof InterruptedException || cause instanceof CancellationException) { cause = new TimeoutCacheException("Flush was cancelled.", cause); } if (cause instanceof CacheException) { infoMsg.setResult(((CacheException) cause).getRc(), cause.getMessage()); } else { infoMsg.setResult(CacheException.DEFAULT_ERROR_CODE, cause.getMessage()); } } infoMsg.setTransferTime(System.currentTimeMillis() - activatedAt); infoMsg.setFileSize(getFileAttributes().getSize()); infoMsg.setTimeQueued(activatedAt - createdAt); if (!suppressedStoreErrors.contains(infoMsg.getResultCode())) { if (infoMsg.getResultCode() != 0) { LOGGER.warn("Flush of {} failed with: {}.", pnfsId, cause.toString()); } billingStub.notify(infoMsg); } flushRequests.removeAndCallback(pnfsId, cause); }
@Override public void configure(Context context) { String resolutionsStr = context.getString("resolutions", "month,day,hour,minute,second"); String[] resolutionsArray = resolutionsStr.split(","); for (String resolution : resolutionsArray) { if (resolution.trim().equals("month")) { resolutions[4] = true; } else if (resolution.trim().equals("day")) { resolutions[3] = true; } else if (resolution.trim().equals("hour")) { resolutions[2] = true; } else if (resolution.trim().equals("minute")) { resolutions[1] = true; } else if (resolution.trim().equals("second")) { resolutions[0] = true; } else { LOGGER.warn("[" + this.getName() + "] Unknown resolution " + resolution); } // if else } // for LOGGER.debug( "[" + this.getName() + "] Reading configuration (resolutions=" + resolutionsStr + ")"); super.configure(context); } // configure
public void failed(Exception cause) { if (cause instanceof InterruptedException || cause instanceof CancellationException) { cause = new TimeoutCacheException("Stage was cancelled.", cause); } LOGGER.warn("Remove of {} failed with {}.", uri, cause); removeRequests.removeAndCallback(uri, cause); }
@Override public void innerRun() { try (CDC ignored = _lock.getCdc().restore()) { CellMessageAnswerable callback = _lock.getCallback(); CellMessage answer; Object obj; try { answer = _message.decode(); obj = answer.getMessageObject(); } catch (SerializationException e) { LOGGER.warn(e.getMessage()); obj = e; answer = null; } EventLogger.sendEnd(_lock.getMessage()); if (obj instanceof Exception) { callback.exceptionArrived(_lock.getMessage(), (Exception) obj); } else { callback.answerArrived(_lock.getMessage(), answer); } LOGGER.trace("addToEventQueue : callback done for : {}", _message); } }
private JSONResult parse(String jsonString) { JSONResult ret = new JSONResult(); JSON json = JSONSerializer.toJSON(jsonString); JSONObject jo = (JSONObject) json; ret.total = jo.getInt("totalCount"); Set<String> names; JSONArray arrResults = jo.optJSONArray("results"); if (arrResults != null) { names = getArray(arrResults); } else { JSONObject results = jo.optJSONObject("results"); if (results != null) { names = Collections.singleton(getSingle(results)); } else { LOGGER.warn("No results found"); names = Collections.EMPTY_SET; } } ret.names = names; ret.returnedCount = names.size(); return ret; }
public void onTimeoutExpire(User user, Server server) { String serverId = server.getId(); TempServerConfig serverConfig = serverStorage.get(serverId); if (serverConfig == null) { serverConfig = new TempServerConfig(serverId); serverStorage.put(serverId, serverConfig); } ServerTimeoutStorage storage = serverConfig.getServerTimeouts(); if (storage != null) { ServerTimeout timeout = storage.getTimeouts().remove(user.getId()); if (timeout != null) { saveServerConfig(serverConfig); LOGGER.info( "Expiring timeout for {} ({}) in {} ({})", user.getUsername(), user.getId(), server.getName(), server.getId()); if (apiClient.getUserById(user.getId(), server) != NO_USER) { apiClient.sendMessage( loc.localize("message.mod.timeout.expire", user.getId()), server.getId()); } removeTimeoutRole(user, server, apiClient.getChannelById(server.getId())); return; } } LOGGER.warn( "Unable to expire: find server or timeout entry for {} ({}) in {} ({})", user.getUsername(), user.getId(), server.getName(), server.getId()); }
public boolean requiresInitialization(FileLock lock) { if (!didRebuild) { if (validator != null && !validator.isValid()) { LOGGER.debug( "Invalidating {} as cache validator return false.", DefaultPersistentDirectoryCache.this); return true; } } if (!lock.getUnlockedCleanly()) { if (!lock.getState().isInInitialState()) { LOGGER.warn( "Invalidating {} as it was not closed cleanly.", DefaultPersistentDirectoryCache.this); } return true; } Properties cachedProperties = GUtil.loadProperties(propertiesFile); for (Map.Entry<?, ?> entry : properties.entrySet()) { String previousValue = cachedProperties.getProperty(entry.getKey().toString()); String currentValue = entry.getValue().toString(); if (!previousValue.equals(currentValue)) { LOGGER.debug( "Invalidating {} as cache property {} has changed from {} to {}.", DefaultPersistentDirectoryCache.this, entry.getKey(), previousValue, currentValue); return true; } } return false; }
@Override protected void onSubmit(AjaxRequestTarget target, Form<?> form) { try { Product product = (Product) form.getDefaultModelObject(); if (product.getId() == 0) { product.setActive(true); productDataProvider.persist(product); } else { productDataProvider.merge(product); } ProductViewOrEditPanel.this.removeAll(); ProductViewOrEditPanel.this.add(new ProductViewFragement().setOutputMarkupId(true)); } catch (RuntimeException e) { LOGGER.warn(e.getMessage(), e); String[] messages = e.getMessage().split(": "); String message = messages[messages.length - 1]; warn(message.substring(0, 1).toUpperCase() + message.substring(1)); } finally { target.add(target.getPage()); } }
/** * Constructs a request logging filter wrapper. * * @param request the request to wrap. * @throws IOException if any problems were encountered while reading from the stream. */ public RequestLoggingFilterWrapper(HttpServletRequest request) throws IOException { // Perform super class processing. super(request); // Only grab the payload if debugging is enabled. Otherwise, we'd always be pre-reading the // entire payload for no reason which cause a slight // performance degradation for no reason. if (LOGGER.isDebugEnabled()) { // Read the original payload into the payload variable. InputStream inputStream = null; try { // Get the input stream. inputStream = request.getInputStream(); if (inputStream != null) { // Read the payload from the input stream. payload = IOUtils.toByteArray(request.getInputStream()); } } finally { if (inputStream != null) { try { inputStream.close(); } catch (IOException iox) { LOGGER.warn("Unable to close request input stream.", iox); } } } } }
public String ac_whatToDo_$_1(Args args) { String domainName = args.argv(0); String serial = args.getOpt("serial"); Map<String, HostAndPort> cores = coreDomains.cores(); switch (cores.size()) { case 0: return null; case 1: String broker = Iterables.get(cores.keySet(), 0); return "do" + (serial != null ? " -serial=" + serial : "") + " " + domainName + " " + "nl" + " c:" + broker + " d:" + broker; default: LOGGER.warn( "Legacy domain {} tried to connect, but are not supported in multi-core topologies.", domainName); return "do" + (serial != null ? " -serial=" + serial : "") + " " + domainName; } }
@Override public void run() { final Selector selector = this.selector; for (; ; ) { ++reactCount; try { selector.select(1000L); register(selector); Set<SelectionKey> keys = selector.selectedKeys(); try { for (SelectionKey key : keys) { Object att = key.attachment(); System.out.println("attchment " + att); if (att != null && key.isValid()) { int readyOps = key.readyOps(); if ((readyOps & SelectionKey.OP_READ) != 0) { read((NIOConnection) att); } else if ((readyOps & SelectionKey.OP_WRITE) != 0) { write((NIOConnection) att); } else { key.cancel(); } } else { key.cancel(); } } } finally { keys.clear(); } } catch (Throwable e) { LOGGER.warn(name, e); } } }
@Override public void layerRemoved(LayerCollectionEvent e) { HashSet<ILayer> newSelection = new HashSet<ILayer>(); newSelection.addAll(Arrays.asList(selectedLayers)); ILayer[] affected = e.getAffected(); for (final ILayer layer : affected) { // Check active if (activeLayer == layer) { setActiveLayer(null); } // Check selection newSelection.remove(layer); layer.removeLayerListenerRecursively(openerListener); if (isOpen()) { try { layer.close(); } catch (LayerException e1) { LOGGER.warn(I18N.tr("Cannot close layer {0}", layer.getName()), e1); } } } setSelectedLayers(newSelection.toArray(new ILayer[newSelection.size()])); // checkIfHasToResetSRID(); }
public void shutdown() { try { running = false; jedis.disconnect(); } catch (Throwable t) { LOGGER.warn(t.getMessage(), t); } }
@Override public synchronized void send(final Event event) { if (batchSize == 1) { if (rpcClient == null) { rpcClient = connect(agents, retries, connectTimeoutMillis, requestTimeoutMillis); } if (rpcClient != null) { try { rpcClient.append(event); } catch (final Exception ex) { rpcClient.close(); rpcClient = null; final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg, ex); throw new AppenderLoggingException("No Flume agents are available"); } } else { final String msg = "Unable to write to " + getName() + " at " + agents[current].getHost() + ':' + agents[current].getPort(); LOGGER.warn(msg); throw new AppenderLoggingException("No Flume agents are available"); } } else { batchEvent.addEvent(event); final int eventCount = batchEvent.getEvents().size(); if (eventCount == 1) { nextSend = System.nanoTime() + delayNanos; } if (eventCount >= batchSize || System.nanoTime() >= nextSend) { send(batchEvent); batchEvent = new BatchEvent(); } } }
@Override public void actionPerformed(ActionEvent e) { try { Util.runAbstractWorker(DbImportAction.this); } catch (Throwable throwable) { LOGGER.warn("Problem importing from database", throwable); } }
/** * Actual writing occurs here. * * @param logEvent The LogEvent. */ @Override public void append(LogEvent logEvent) { if (!isStarted()) { throw new IllegalStateException("AsyncAppender " + getName() + " is not active"); } if (!(logEvent instanceof Log4jLogEvent)) { if (!(logEvent instanceof RingBufferLogEvent)) { return; // only know how to Serialize Log4jLogEvents and RingBufferLogEvents } logEvent = ((RingBufferLogEvent) logEvent).createMemento(); } logEvent.getMessage().getFormattedMessage(); // LOG4J2-763: ask message to freeze parameters final Log4jLogEvent coreEvent = (Log4jLogEvent) logEvent; boolean appendSuccessful = false; if (blocking) { if (isAppenderThread.get() == Boolean.TRUE && queue.remainingCapacity() == 0) { // LOG4J2-485: avoid deadlock that would result from trying // to add to a full queue from appender thread coreEvent.setEndOfBatch(false); // queue is definitely not empty! appendSuccessful = thread.callAppenders(coreEvent); } else { final Serializable serialized = Log4jLogEvent.serialize(coreEvent, includeLocation); try { // wait for free slots in the queue queue.put(serialized); appendSuccessful = true; } catch (final InterruptedException e) { // LOG4J2-1049: Some applications use Thread.interrupt() to send // messages between application threads. This does not necessarily // mean that the queue is full. To prevent dropping a log message, // quickly try to offer the event to the queue again. // (Yes, this means there is a possibility the same event is logged twice.) // // Finally, catching the InterruptedException means the // interrupted flag has been cleared on the current thread. // This may interfere with the application's expectation of // being interrupted, so when we are done, we set the interrupted // flag again. appendSuccessful = queue.offer(serialized); if (!appendSuccessful) { LOGGER.warn( "Interrupted while waiting for a free slot in the AsyncAppender LogEvent-queue {}", getName()); } // set the interrupted flag again. Thread.currentThread().interrupt(); } } } else { appendSuccessful = queue.offer(Log4jLogEvent.serialize(coreEvent, includeLocation)); if (!appendSuccessful) { error("Appender " + getName() + " is unable to write primary appenders. queue is full"); } } if (!appendSuccessful && errorAppender != null) { errorAppender.callAppender(coreEvent); } }
private void clearTables(EntityManagerFactory emf) throws SQLException { long start = System.currentTimeMillis(); Set<ManagedType<?>> types = emf.getMetamodel().getManagedTypes(); EntityManager entityManager = makeEntityManager(emf); Set<Class<?>> javaTypes = new HashSet<Class<?>>(); for (ManagedType<?> type : types) { javaTypes.add(type.getJavaType()); } int lastsize = javaTypes.size(); while (!javaTypes.isEmpty()) { Iterator<Class<?>> iterator = javaTypes.iterator(); Collection<Exception> exceptionsDuringClean = new ArrayList<Exception>(); while (iterator.hasNext()) { Class<?> javaType = iterator.next(); String name = retrieveEntityName(javaType); if (name == null) { LOGGER.warn("could not determine name for entity {}", javaType); iterator.remove(); continue; } try { entityManager.getTransaction().begin(); entityManager.createQuery("DELETE FROM " + name).executeUpdate(); entityManager.getTransaction().commit(); iterator.remove(); } catch (Exception e) { if (e instanceof PersistenceException || e.getClass() .getName() .equals( "org.eclipse.persistence.exceptions.DatabaseException") // for eclipse-link // < 2.5.0 ) { exceptionsDuringClean.add(e); LOGGER.debug("error during delete, could be normal", e); entityManager.getTransaction().rollback(); } } } if (javaTypes.size() == lastsize) { entityManager.getTransaction().begin(); entityManager.createNativeQuery("SHUTDOWN").executeUpdate(); try { entityManager.getTransaction().commit(); } catch (Exception e) { // will always fail because database is shutting down, // but we need to clear the transaction-state in the entitymanager break; } LOGGER.error("could not clean database", exceptionsDuringClean.iterator().next()); } lastsize = javaTypes.size(); } entityManager.close(); LOGGER.info("cleared database in {}ms", System.currentTimeMillis() - start); }
public boolean applyTimeout( User issuingUser, Channel noticeChannel, Server server, User user, Duration duration) { String serverId = server.getId(); if (duration != null && !duration.isNegative() && !duration.isZero()) { ServerTimeout timeout = new ServerTimeout( duration, Instant.now(), user.getId(), serverId, user.getUsername(), issuingUser.getId()); TempServerConfig serverConfig = serverStorage.get(serverId); if (serverConfig == null) { serverConfig = new TempServerConfig(serverId); serverStorage.put(serverId, serverConfig); } ServerTimeoutStorage storage = serverConfig.getServerTimeouts(); if (storage == null) { storage = new ServerTimeoutStorage(); serverConfig.setServerTimeouts(storage); } if (applyTimeoutRole(user, server, noticeChannel)) { storage.getTimeouts().put(user.getId(), timeout); ScheduledFuture future = timeoutService.schedule( () -> onTimeoutExpire(user, server), duration.getSeconds(), TimeUnit.SECONDS); timeout.setTimerFuture(future); saveServerConfig(serverConfig); String durationStr = formatDuration(duration); String instantStr = formatInstant(timeout.getEndTime()); String msg = loc.localize( "commands.mod.timeout.response", user.getUsername(), user.getId(), durationStr, instantStr); apiClient.sendMessage(msg, noticeChannel); LOGGER.info( "[{}] '{}': Timing out {} ({}) for {} (until {}), issued by {} ({})", serverId, server.getName(), user.getUsername(), user.getId(), durationStr, instantStr, issuingUser.getUsername(), issuingUser.getId()); } // No else with error - applyTimeoutRole does that for us return true; } else { LOGGER.warn("Invalid duration format"); } return false; }
public void saveServerConfig(TempServerConfig storage) { try { Files.createDirectories(serverStorageDir); } catch (IOException e) { LOGGER.warn("Unable to create server storage directory", e); return; } Path serverStorageFile = serverStorageDir.resolve(storage.getServerId() + ".json"); try (BufferedWriter writer = Files.newBufferedWriter(serverStorageFile, UTF_8, CREATE, TRUNCATE_EXISTING)) { gson.toJson(storage, writer); writer.flush(); } catch (IOException e) { LOGGER.warn("Unable to write server storage file for " + storage.getServerId(), e); return; } LOGGER.info("Saved server {}", storage.getServerId()); }