public static void main(String[] args) throws NoSuchProviderException, NoSuchAlgorithmException { System.out.println("Starting rfid-reader2keyboard"); Security.insertProviderAt(new Smartcardio(), 1); TerminalFactory.getInstance("PC/SC", null); System.out.println("The following terminals were detected:"); System.out.println(Read.listTerminals()); System.out.println(); System.out.println( "inventid RFID capturing is currently active. Close this dialog to deactivate."); System.out.println( "The most likely reason you see this is in order to resolve any issue you ay have found. Please follow" + " the instructions of inventid support and send these lines to the given email address"); executorService.scheduleAtFixedRate(errorLogger, 10, 30, TimeUnit.SECONDS); executorService.scheduleAtFixedRate(detectorLoop, 10, 15, TimeUnit.SECONDS); Read reader = new Read(); reader.startRunning(); Runtime.getRuntime() .addShutdownHook( new Thread() { public void run() { executorService.shutdownNow(); System.out.println( "inventid RFID capturing is now inactive. You can close this dialog"); } }); }
public SenderPool() { this.saorl = new LinkedList<SenderAddedOrRemovedListener>(); this.statsListeners = new LinkedList<OverallSenderStatisticsUpdatedListener>(); analyzer = new OverallSenderStatisticAnalyzer(); this.stfe = new ScheduledThreadPoolExecutor(threadPoolSize); stfe.scheduleAtFixedRate(analyzer, statsInterval, statsInterval, TimeUnit.MILLISECONDS); }
/** 从数据库获取待发送数据 */ private void startWatchData() { watchDataThreadPool.scheduleAtFixedRate( new Runnable() { @Override public void run() { // 数据库查询出来并锁定的数据 List<SmQueue> temp = new ArrayList<SmQueue>(); int limit = smgFlowLimit - queue.size(); if (limit > 0) { DatabaseTransaction trans = new DatabaseTransaction(true); try { temp = new DbService(trans).getMsgFromQueueAndLockMsg(channel.getId(), limit); trans.commit(); } catch (Exception ex) { ChannelLog.log( Logger.getLogger(SubmitThread.class), ex.getMessage(), LevelUtils.getErrLevel(channel.getId())); trans.rollback(); } finally { trans.close(); } if (temp.size() > 0) { queue.addAll(temp); } } } }, 1000, 1000, TimeUnit.MILLISECONDS); }
/** * Implements the Token Bucket algorithm to provide a maximum number of invocations within each * fixed time window. Useful for rate-limiting. If given a non-null executor, the scheduled * runnables are passed to that executor for execution at the rate limit. If executor is null, a * single-threaded executor is used * * @param executor the Executor which executes the Runnables. the executor is not called with the * runnable until the rate limit has been fulfilled * @param invocations number of queries allowed during each time window * @param per the duration of each time window */ public RateLimiter(Executor executor, final int invocations, final Duration per) { if (executor != null) { this.executor = executor; } else { this.executor = Executors.newSingleThreadExecutor(); } // This thread fills the TokenBucket with available requests every time window ScheduledThreadPoolExecutor replenisher = new ScheduledThreadPoolExecutor(1); replenisher.scheduleAtFixedRate( new Runnable() { public void run() { int permitsToCreate = invocations - requestsAvailable.availablePermits(); if (permitsToCreate > 0) { synchronized (requestsAvailable) { // bring the number of requests up to the maximum size per time window requestsAvailable.release(permitsToCreate); } } } }, 0, per.getMillis(), TimeUnit.MILLISECONDS); pump = new RunnablePump(); pump.start(); }
@Override public void start() { super.start(); setRunning(true); Log.d(TAG, "Starting Location sensor"); // Use any provider (LOCATION, Network or Passive) addLocationListenerWithAllProviders(); IntentFilter intentFilter = new IntentFilter(LOCATION_UPDATE_ACTION); getContext().registerReceiver(locationReceiver, intentFilter); Log.d(TAG, "Starting Location sensor [done]"); Sensor.setSensorStatus(Sensor.SENSOR_LOCATION, Sensor.SENSOR_ON); refreshStatus(); if (stpe == null) { stpe = new ScheduledThreadPoolExecutor(1); stpe.scheduleAtFixedRate(controller, 0, Utilities.LOCATION_CHECK_TIME, TimeUnit.MILLISECONDS); /*stpe.scheduleAtFixedRate(controller, MAX_TIME_WITHOUT_NEW_LOCATION, MAX_TIME_WITHOUT_NEW_LOCATION, TimeUnit.MILLISECONDS);*/ } }
/** * Submit the work for actual execution. * * @throws InvalidProtocolBufferException */ public void submitWork(SubmitWorkRequestProto request, String llapHost, int llapPort) { // Register the pending events to be sent for this spec. VertexOrBinary vob = request.getWorkSpec(); assert vob.hasVertexBinary() != vob.hasVertex(); SignableVertexSpec vertex = null; try { vertex = vob.hasVertex() ? vob.getVertex() : SignableVertexSpec.parseFrom(vob.getVertexBinary()); } catch (InvalidProtocolBufferException e) { throw new RuntimeException(e); } QueryIdentifierProto queryIdentifierProto = vertex.getQueryIdentifier(); TezTaskAttemptID attemptId = Converters.createTaskAttemptId( queryIdentifierProto, vertex.getVertexIndex(), request.getFragmentNumber(), request.getAttemptNumber()); final String fragmentId = attemptId.toString(); pendingEvents.putIfAbsent( fragmentId, new PendingEventData( new TaskHeartbeatInfo(fragmentId, llapHost, llapPort), Lists.<TezEvent>newArrayList())); // Setup timer task to check for hearbeat timeouts timer.scheduleAtFixedRate( new HeartbeatCheckTask(), connectionTimeout, connectionTimeout, TimeUnit.MILLISECONDS); // Send out the actual SubmitWorkRequest communicator.sendSubmitWork( request, llapHost, llapPort, new LlapProtocolClientProxy.ExecuteRequestCallback<SubmitWorkResponseProto>() { @Override public void setResponse(SubmitWorkResponseProto response) { if (response.hasSubmissionState()) { if (response.getSubmissionState().equals(SubmissionStateProto.REJECTED)) { String msg = "Fragment: " + fragmentId + " rejected. Server Busy."; LOG.info(msg); if (responder != null) { Throwable err = new RuntimeException(msg); responder.submissionFailed(fragmentId, err); } return; } } } @Override public void indicateError(Throwable t) { String msg = "Failed to submit: " + fragmentId; LOG.error(msg, t); Throwable err = new RuntimeException(msg, t); responder.submissionFailed(fragmentId, err); } }); }
/** * Sends a Spine message. This is a wrapper round the Transmitter thread and registers the message * with the retry mechanism if the contract properties require. It will also, for asynchronous * messages, start the listener if it is not already running. * * @param s Concrete instance of Sendable, encapsulating the message to send. * @param c SDS details of recipient * @throws Exception if there was a Connection Manager boot exception, or if starting any required * listener fails, */ public void send(Sendable s, SdsTransmissionDetails c) throws Exception { // Note: check this here so getInstance() doesn't have to throw any // exception - that means that we don't have to catch them in the // Transmitter, which can just log if anything goes wrong with its // own processing. // if (bootException != null) throw bootException; if (!c.isSynchronous()) { listen(); if ((s.getType() != Sendable.ACK) && (c.getDuplicateElimination().contentEquals("always"))) { synchronized (LOGSOURCE) { if (timer == null) { timer = new ScheduledThreadPoolExecutor(TIMER_THREAD_POOL_SIZE); RetryProcessor rp = new RetryProcessor(); timer.scheduleAtFixedRate( rp, retryCheckPeriod, retryCheckPeriod, TimeUnit.MILLISECONDS); } } if (!requests.containsKey(s.getMessageId())) { requests.put(s.getMessageId(), s); } } } Transmitter t = new Transmitter(s); t.start(); }
/** Schedules the notification updater task if it hasn't been scheduled yet. */ private void setupNotificationUpdater() { if (AppConfig.DEBUG) Log.d(TAG, "Setting up notification updater"); if (notificationUpdater == null) { notificationUpdater = new NotificationUpdater(); notificationUpdaterFuture = schedExecutor.scheduleAtFixedRate(notificationUpdater, 5L, 5L, TimeUnit.SECONDS); } }
public ScheduledFuture<?> scheduleAiAtFixedRate(Runnable r, long initial, long delay) { try { delay = ThreadPoolManager.validateDelay(delay); initial = ThreadPoolManager.validateDelay(initial); return _aiScheduledThreadPool.scheduleAtFixedRate( new RunnableWrapper(r), initial, delay, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException e) { return null; /* shutdown, ignore */ } }
public static void main(String[] args) { // Schedule a task using only Java ThreadExecutor System.out.println("Main Thread started"); MyTask myTask = new MyTask(); int corePoolSize = 5; ScheduledThreadPoolExecutor myExecutor = new ScheduledThreadPoolExecutor(corePoolSize); // myExecutor.scheduleAtFixedRate(myTask, 1000, 2000, TimeUnit.MILLISECONDS); myExecutor.scheduleAtFixedRate(myTask, 0, 1, TimeUnit.DAYS); System.out.println("Main Thread ended"); }
private void initConfigMonitoring(ServletContext context) { //noinspection unchecked Collection<URI> webURIs = (Collection<URI>) context.getAttribute("com.sun.faces.webresources"); if (isDevModeEnabled() && webURIs != null && !webURIs.isEmpty()) { webResourcePool = new ScheduledThreadPoolExecutor(1, new MojarraThreadFactory("WebResourceMonitor")); webResourcePool.scheduleAtFixedRate( new WebConfigResourceMonitor(context, webURIs), 2000, 2000, TimeUnit.MILLISECONDS); } context.removeAttribute("com.sun.faces.webresources"); }
public ScheduledFuture<?> scheduleAiAtFixedRate(Runnable r, long initial, long delay) { try { if (delay < 0) { delay = 0; } if (initial < 0) { initial = 0; } return _aiScheduledThreadPool.scheduleAtFixedRate(r, initial, delay, TimeUnit.MILLISECONDS); } catch (RejectedExecutionException e) { return null; /* shutdown, ignore */ } }
public void initAudio(int size) { if (mAudioTrack != null) return; size /= 8; mAudioData = new byte[size]; int sampleFreq = 44100; int bufferSize = Math.max( size, AudioTrack.getMinBufferSize( sampleFreq, AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT)); mAudioTrack = new QuakeGVRAudioTrack( AudioManager.STREAM_MUSIC, sampleFreq, AudioFormat.CHANNEL_CONFIGURATION_STEREO, AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM); mAudioTrack.play(); long sleeptime = (size * 1000000000l) / (2 * 2 * sampleFreq); stpe = new ScheduledThreadPoolExecutor(5); stpe.scheduleAtFixedRate( new Runnable() { @Override public void run() { if (reqThreadrunning) { GLES3JNILib.requestAudioData(); } } }, 0, sleeptime, TimeUnit.NANOSECONDS); }
public void afterPropertiesSet() throws Exception { scheduler = new ScheduledThreadPoolExecutor( 1, new NamedThreadFactory("Otter-Statistics-Table"), new ThreadPoolExecutor.CallerRunsPolicy()); if (statUnit > 0) { scheduler.scheduleAtFixedRate( new Runnable() { public void run() { try { flushBehaviorHistory(); } catch (Exception e) { logger.error("flush delay stat failed!", e); } } }, statUnit, statUnit, TimeUnit.MILLISECONDS); } }
private void waitForState(final String state) { try { final ScheduledThreadPoolExecutor timer = new ScheduledThreadPoolExecutor(1); timer.scheduleAtFixedRate( new Runnable() { @Override public void run() { server = compute.servers().server(server.getId()).get(); if (state.equals(server.getStatus())) { timer.shutdown(); } else { System.out.print("."); } } }, 3, 1, TimeUnit.SECONDS); timer.awaitTermination(30, TimeUnit.SECONDS); } catch (Exception e) { } }
@Test @Parameters @SuppressWarnings("unchecked") public final void testDelayed( int minDelay, int maxDelay, float lossChance, float dupChance, int executeInterval, long executeTime, boolean retransmit) throws InterruptedException { /* * Record phase */ final UnreliableQueue<Packet<Long>> aToB = new UnreliableQueue<Packet<Long>>( queueListenerAtoB, minDelay, maxDelay, lossChance, dupChance); final UnreliableQueue<Packet<Long>> bToA = new UnreliableQueue<Packet<Long>>( queueListenerBtoA, minDelay, maxDelay, lossChance, dupChance); ProtocolListener<Long> listenerA = DEBUG ? new DebugProtocolListener<Long>(protocolListenerA, Logger.getConsoleLogger("A")) : protocolListenerA; final TestHost<Long> hostA = new TestHost<Long>( hostListenerA, new LongDataGenerator(), bToA, aToB, new ProtocolConfig<Long>(listenerA), DEBUG ? "A" : null); final List<Long> sentA = new ArrayList<Long>(); final List<Long> lostSentA = new ArrayList<Long>(); final List<Long> dupedSentA = new ArrayList<Long>(); final List<Long> receivedA = new ArrayList<Long>(); final List<Long> ackedA = new ArrayList<Long>(); final List<Long> notAckedA = new ArrayList<Long>(); final List<Long> orderedA = new ArrayList<Long>(); final List<Long> unorderedA = new ArrayList<Long>(); final List<Long> retransmitsA = new ArrayList<Long>(); ProtocolListener<Long> listenerB = DEBUG ? new DebugProtocolListener<Long>(protocolListenerB, Logger.getConsoleLogger("B")) : protocolListenerB; final TestHost<Long> hostB = new TestHost<Long>( hostListenerB, new LongDataGenerator(), aToB, bToA, new ProtocolConfig<Long>(listenerB), DEBUG ? "B" : null); final List<Long> sentB = new ArrayList<Long>(); final List<Long> lostSentB = new ArrayList<Long>(); final List<Long> dupedSentB = new ArrayList<Long>(); final List<Long> receivedB = new ArrayList<Long>(); final List<Long> ackedB = new ArrayList<Long>(); final List<Long> notAckedB = new ArrayList<Long>(); final List<Long> orderedB = new ArrayList<Long>(); final List<Long> unorderedB = new ArrayList<Long>(); final List<Long> retransmitsB = new ArrayList<Long>(); new NonStrictExpectations() { { hostListenerA.notifyReceived(withCapture(receivedA)); hostListenerA.notifySent(withCapture(sentA)); hostListenerA.notifyRetransmitted(withCapture(retransmitsA)); protocolListenerA.handleAckedData(anyShort, withCapture(ackedA)); protocolListenerA.handleUnackedData(anyShort, withCapture(notAckedA)); protocolListenerA.handleOrderedData(anyShort, withCapture(orderedA)); protocolListenerA.handleUnorderedData(anyShort, withCapture(unorderedA)); } }; new NonStrictExpectations() { { hostListenerB.notifyReceived(withCapture(receivedB)); hostListenerB.notifySent(withCapture(sentB)); hostListenerB.notifyRetransmitted(withCapture(retransmitsB)); protocolListenerB.handleAckedData(anyShort, withCapture(ackedB)); protocolListenerB.handleUnackedData(anyShort, withCapture(notAckedB)); protocolListenerB.handleOrderedData(anyShort, withCapture(orderedB)); protocolListenerB.handleUnorderedData(anyShort, withCapture(unorderedB)); } }; new NonStrictExpectations() { { queueListenerAtoB.notifyDuplicate((Packet<Long>) any); result = new Delegate<Packet<Long>>() { @SuppressWarnings("unused") void delegate(Packet<Long> dup) { for (Metadata<Long> metadata : dup.getMetadatas()) { dupedSentA.add(metadata.getData()); if (DEBUG) System.out.println("[A-dupedSent]: " + metadata.getData()); } } }; queueListenerAtoB.notifyLoss((Packet<Long>) any); result = new Delegate<Packet<Long>>() { @SuppressWarnings("unused") void delegate(Packet<Long> loss) { for (Metadata<Long> metadata : loss.getMetadatas()) { lostSentA.add(metadata.getData()); if (DEBUG) System.out.println("[A-lostSent]: " + metadata.getData()); } } }; queueListenerBtoA.notifyDuplicate((Packet<Long>) any); result = new Delegate<Packet<Long>>() { @SuppressWarnings("unused") void delegate(Packet<Long> dup) { for (Metadata<Long> metadata : dup.getMetadatas()) { dupedSentB.add(metadata.getData()); if (DEBUG) System.out.println("[B-dupedSent]: " + metadata.getData()); } } }; queueListenerBtoA.notifyLoss((Packet<Long>) any); result = new Delegate<Packet<Long>>() { @SuppressWarnings("unused") void delegate(Packet<Long> loss) { for (Metadata<Long> metadata : loss.getMetadatas()) { lostSentB.add(metadata.getData()); if (DEBUG) System.out.println("[B-lostSent]: " + metadata.getData()); } } }; } }; /* * Replay phase */ // play it for a longer interval executor.scheduleAtFixedRate(hostA, 0, executeInterval, TimeUnit.MILLISECONDS); executor.scheduleAtFixedRate( hostB, executeInterval / 2, executeInterval, TimeUnit.MILLISECONDS); executor.schedule( new Runnable() { @Override public void run() { // enable reliable queue mode for final messages aToB.setDupChance(0f); aToB.setLossChance(0f); aToB.setMinDelay(0L); aToB.setMaxDelay(0L); bToA.setDupChance(0f); bToA.setLossChance(0f); bToA.setMinDelay(0L); bToA.setMaxDelay(0L); } }, executeTime - executeTime / 10, TimeUnit.SECONDS); executor.awaitTermination(executeTime, TimeUnit.SECONDS); executor.shutdown(); executor.awaitTermination(executeInterval * 2 + maxDelay * 2, TimeUnit.MILLISECONDS); // let pending messages finish Thread.sleep(maxDelay * 2); hostA.receive(); hostB.receive(); hostA.send(); hostB.send(); Thread.sleep(maxDelay * 2); // wait for queue to make all elements available hostA.receive(); hostB.receive(); System.out.println(); /* * Verify phase */ for (Long item : notAckedA) assertTrue("notAcked data should not have been acked", !ackedA.contains(item)); for (Long item : notAckedB) assertTrue("notAcked data should not have been acked", !ackedB.contains(item)); for (Long item : retransmitsA) assertTrue("retransmitted data should have been sent from sender", sentA.contains(item)); for (Long item : retransmitsB) assertTrue("retransmitted data should have been sent from sender", sentB.contains(item)); for (Long item : lostSentA) assertTrue("over medium lost data should have been sent from sender", sentA.contains(item)); for (Long item : lostSentB) assertTrue("over medium lost data should have been sent from sender", sentB.contains(item)); for (Long item : dupedSentA) assertTrue( "over medium duplicated data should have been sent from sender", sentA.contains(item)); for (Long item : dupedSentB) assertTrue( "over medium duplicated data should have been sent from sender", sentB.contains(item)); Long lastItem = null; for (Long item : orderedA) { assertTrue("orderly received data should have been sent from sender", sentB.contains(item)); if (lastItem != null) { assertTrue("ordered data should be ordered", item > lastItem); } lastItem = item; } lastItem = null; for (Long item : orderedB) { assertTrue("orderly received data should have been sent from sender", sentA.contains(item)); if (lastItem != null) { assertTrue("ordered data should be ordered", item > lastItem); } lastItem = item; } lastItem = null; for (Long item : unorderedA) { assertTrue("unorderly received data should have been sent from sender", sentB.contains(item)); if (lastItem != null) { assertTrue("unordered data should be ordered", item > lastItem); } lastItem = item; assertTrue("unordered data should not have been orderly received", !orderedA.contains(item)); // The following assertions can not be guaranteed, since there may be multiple unordered // events and multiple holes until an ordered event occurs // Long pred = item; // do { // pred--; // } while(unorderedA.contains(pred)); // Long succ = item; // do { // succ++; // } while(unorderedA.contains(succ)); // assertTrue("ordered data contains predecessor of unorderedData", // orderedA.contains(pred)); // assertTrue("ordered data contains successor of unorderedData", orderedA.contains(succ)); } lastItem = null; for (Long item : unorderedB) { assertTrue("orderly received data should have been sent from sender", sentA.contains(item)); if (lastItem != null) { assertTrue("unordered data should be ordered", item > lastItem); } lastItem = item; assertTrue("unordered data should not have been orderly received", !orderedB.contains(item)); // The following assertions can not be guaranteed, since there may be multiple unordered // events and multiple holes until an ordered event occurs // Long pred = item; // do { // pred--; // } while(unorderedB.contains(pred)); // Long succ = item; // do { // succ++; // } while(unorderedB.contains(succ)); // assertTrue("ordered data contains predecessor of unorderedData", // orderedB.contains(pred)); // assertTrue("ordered data contains successor of unorderedData", orderedB.contains(succ)); } // the following addition of "magic constants" is due to the scheduling procedure of the very // last messages assertEquals( "all messages from A must be received at B", receivedB.size(), sentA.size() - lostSentA.size() + dupedSentA.size()); assertEquals( "all messages from A must be acked", ackedA.size(), sentA.size() - retransmitsA.size() - notAckedA.size() - 1); assertEquals( "all messages from A must be ordered at B", orderedB.size(), sentA.size() - retransmitsA.size() - unorderedB.size()); // the following addition of "magic constants" is due to the scheduling procedure of the very // last messages assertEquals( "all messages from B must be received at A", receivedA.size(), sentB.size() - lostSentB.size() + dupedSentB.size()); assertEquals( "all messages from B must be acked", ackedB.size(), sentB.size() - retransmitsB.size() - notAckedB.size() - 1); assertEquals( "all messages from B must be ordered at A", orderedA.size(), sentB.size() - retransmitsB.size() - unorderedA.size()); if (lossChance == 0f) { assertEquals("no lost packets", 0, lostSentB.size()); assertEquals("no lost packets", 0, lostSentA.size()); } if (dupChance == 0f) { assertEquals("no duped packets", 0, dupedSentB.size()); assertEquals("no duped packets", 0, dupedSentA.size()); } if (retransmit || lossChance == 0f) { new Verifications() { { protocolListenerA.handleUnackedData(anyShort, anyLong); times = 0; protocolListenerA.handleUnorderedData(anyShort, anyLong); times = 0; } }; new Verifications() { { protocolListenerB.handleUnackedData(anyShort, anyLong); times = 0; protocolListenerB.handleUnorderedData(anyShort, anyLong); times = 0; } }; assertEquals("all packets acked", 0, notAckedA.size()); assertEquals("all packets ordered", 0, unorderedA.size()); assertEquals("all packets acked", 0, notAckedB.size()); assertEquals("all packets ordered", 0, unorderedB.size()); } }
public void repeat(Runnable event, long initialDelay, long period) { scheduler.scheduleAtFixedRate(event, initialDelay, period, TimeUnit.MILLISECONDS); }
@Override public ScheduledFuture<?> scheduleAtFixedRate( Runnable command, long initialDelay, long period, TimeUnit unit) { return m_executor.scheduleAtFixedRate(command, initialDelay, period, unit); }
/** {@inheritDoc} */ @Override public LDIFImportResult importLDIF( LDIFImportConfig importConfig, RootContainer rootContainer, ServerContext serverContext) throws DirectoryException { try { ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(1); try { final LDIFReader reader; try { reader = new LDIFReader(importConfig); } catch (Exception e) { LocalizableMessage m = ERR_LDIF_BACKEND_CANNOT_CREATE_LDIF_READER.get(stackTraceToSingleLineString(e)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), m, e); } long importCount = 0; final long startTime = System.currentTimeMillis(); timerService.scheduleAtFixedRate( new ImportProgress(reader), IMPORT_PROGRESS_INTERVAL, IMPORT_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS); while (true) { final Entry entry; try { entry = reader.readEntry(); if (entry == null) { break; } } catch (LDIFException le) { if (!le.canContinueReading()) { LocalizableMessage m = ERR_LDIF_BACKEND_ERROR_READING_LDIF.get(stackTraceToSingleLineString(le)); throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), m, le); } continue; } final DN dn = entry.getName(); final EntryContainer ec = rootContainer.getEntryContainer(dn); if (ec == null) { final LocalizableMessage m = ERR_LDIF_SKIP.get(dn); logger.error(m); reader.rejectLastEntry(m); continue; } try { ec.addEntry(entry, null); importCount++; } catch (DirectoryException e) { switch (e.getResultCode().asEnum()) { case ENTRY_ALREADY_EXISTS: if (importConfig.replaceExistingEntries()) { final Entry oldEntry = ec.getEntry(entry.getName()); ec.replaceEntry(oldEntry, entry, null); } else { reader.rejectLastEntry(WARN_IMPORT_ENTRY_EXISTS.get()); } break; case NO_SUCH_OBJECT: reader.rejectLastEntry(ERR_IMPORT_PARENT_NOT_FOUND.get(dn.parent())); break; default: // Not sure why it failed. reader.rejectLastEntry(e.getMessageObject()); break; } } } final long finishTime = System.currentTimeMillis(); waitForShutdown(timerService); final long importTime = finishTime - startTime; float rate = 0; if (importTime > 0) { rate = 1000f * reader.getEntriesRead() / importTime; } logger.info( NOTE_IMPORT_FINAL_STATUS, reader.getEntriesRead(), importCount, reader.getEntriesIgnored(), reader.getEntriesRejected(), 0, importTime / 1000, rate); return new LDIFImportResult( reader.getEntriesRead(), reader.getEntriesRejected(), reader.getEntriesIgnored()); } finally { rootContainer.close(); // if not already stopped, then stop it waitForShutdown(timerService); } } catch (DirectoryException e) { logger.traceException(e); throw e; } catch (OpenDsException e) { logger.traceException(e); throw new DirectoryException(getServerErrorResultCode(), e.getMessageObject()); } catch (Exception e) { logger.traceException(e); throw new DirectoryException( getServerErrorResultCode(), LocalizableMessage.raw(e.getMessage())); } }
@Override public JobHandle scheduleRecurring( Group group, final Runnable runnable, long initialDelay, long period, TimeUnit timeUnit) { return new Handle( scheduledExecutor.scheduleAtFixedRate(runnable, initialDelay, period, timeUnit)); }
public void init() throws SipCacheException { executor = new ScheduledThreadPoolExecutor(1); final String configurationPath = configProperties.getProperty(INFINISPAN_CACHE_CONFIG_PATH, DEFAULT_FILE_CONFIG_PATH); if (configProperties.containsKey(INFINISPAN_CACHEMANAGER_JNDI_NAME)) { if (clusteredlogger.isLoggingEnabled(LogLevels.TRACE_INFO)) { clusteredlogger.logInfo( INFINISPAN_CACHEMANAGER_JNDI_NAME + " specified, trying to load Inifinispan CacheManager from JNDI " + configProperties.getProperty(INFINISPAN_CACHEMANAGER_JNDI_NAME)); } executor.scheduleAtFixedRate( new Runnable() { static final int MAX_ATTEMPTS = 30; int attempts = 0; public void run() { attempts++; // Init Infinispan CacheManager if (configProperties.containsKey(INFINISPAN_CACHEMANAGER_JNDI_NAME)) { try { InitialContext context = new InitialContext(); String cacheManagerJndiName = configProperties.getProperty(INFINISPAN_CACHEMANAGER_JNDI_NAME); cm = (CacheContainer) context.lookup(cacheManagerJndiName); if (clusteredlogger.isLoggingEnabled(LogLevels.TRACE_INFO)) { clusteredlogger.logInfo( "Found Inifinispan CacheManager: cacheManagerJndiName \"" + cacheManagerJndiName + "\" " + cm + " after attempts " + attempts); } executor.remove(this); executor.shutdown(); } catch (NamingException e) { // Inifinispan CacheManager JNDI lookup failed: could not get InitialContext or // lookup failed if (attempts > MAX_ATTEMPTS) { clusteredlogger.logError( "Inifinispan CacheManager JNDI lookup failed: could not get InitialContext or lookup failed after attempts " + attempts + " stopping there", e); executor.remove(this); executor.shutdown(); } else { if (clusteredlogger.isLoggingEnabled(LogLevels.TRACE_INFO)) { clusteredlogger.logInfo( "Inifinispan CacheManager JNDI lookup failed: could not get InitialContext or lookup failed after attempts " + attempts + ", retrying every second"); } } return; } } setupCacheStructures(); if (dialogCacheData != null) { dialogCacheData.setDialogs(dialogs); dialogCacheData.setAppDataMap(appDataMap); } if (serverTXCacheData != null) { serverTXCacheData.setServerTransactions(serverTransactions); serverTXCacheData.setServerTransactionsApp(serverTransactionsApp); } if (clientTXCacheData != null) { clientTXCacheData.setClientTransactions(clientTransactions); clientTXCacheData.setClientTransactionsApp(clientTransactionsApp); } } }, 0, 1, TimeUnit.SECONDS); } else { if (clusteredlogger.isLoggingEnabled(LogLevels.TRACE_INFO)) { clusteredlogger.logInfo( INFINISPAN_CACHEMANAGER_JNDI_NAME + " not specified, trying to load Inifinispan CacheManager from configuration file " + configurationPath); } try { if (cm == null) { cm = CacheManagerHolder.getManager(configurationPath); if (clusteredlogger.isLoggingEnabled(LogLevels.TRACE_INFO)) { clusteredlogger.logInfo( "Found Inifinispan CacheManager: configuration file from path \"" + configurationPath + "\" " + cm); } } setupCacheStructures(); } catch (IOException e) { clusteredlogger.logError( "Failed to init Inifinispan CacheManager: could not read configuration file from path \"" + configurationPath + "\"", e); } if (dialogCacheData != null) { dialogCacheData.setDialogs(dialogs); dialogCacheData.setAppDataMap(appDataMap); } if (serverTXCacheData != null) { serverTXCacheData.setServerTransactions(serverTransactions); serverTXCacheData.setServerTransactionsApp(serverTransactionsApp); } if (clientTXCacheData != null) { clientTXCacheData.setClientTransactions(clientTransactions); clientTXCacheData.setClientTransactionsApp(clientTransactionsApp); } } }