public static void perpetualServer() { Configuration conf = new Configuration(); Runnable runnable = new Runnable() { @Override public void run() { Iterator<String> itrModule = conf.getProperties("module").iterator(); Iterator<String> itrMethod = conf.getProperties("method").iterator(); String tmpMethod = itrMethod.next(); while (itrModule.hasNext()) { try { Object object = Class.forName(itrModule.next().toString()).newInstance(); Method method = object.getClass().getDeclaredMethod(tmpMethod, new Class<?>[0]); method.invoke(object); } catch (Exception e) { System.out.println("most likely no module activated!... stack trace follows!"); e.printStackTrace(); } } } }; ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); executor.scheduleAtFixedRate(runnable, 1, 720, TimeUnit.MINUTES); }
@Override public void run() throws HiveException, ExampleException, IOException { try { hiveClient.authenticate(LOGIN, PASSWORD); HiveMessageHandler<DeviceNotification> notificationsHandler = new HiveMessageHandler<DeviceNotification>() { @Override public void handle(DeviceNotification notification) { print("Notification received: {}" + notification); } }; Timestamp serverTimestamp = hiveClient.getInfo().getServerTimestamp(); SubscriptionFilter notificationSubscriptionFilter = new SubscriptionFilter(null, null, serverTimestamp); hiveClient .getNotificationsController() .subscribeForNotifications(notificationSubscriptionFilter, notificationsHandler); ScheduledExecutorService commandsExecutor = Executors.newSingleThreadScheduledExecutor(); commandsExecutor.scheduleAtFixedRate(new CommandTask(), 3, 3, TimeUnit.SECONDS); Thread.currentThread().join(TimeUnit.SECONDS.toMillis(30)); commandsExecutor.shutdownNow(); } catch (InterruptedException e) { throw new ExampleException(e.getMessage(), e); } finally { hiveClient.close(); } }
@Override public void init() { super.init(); eventBus = new EventBus(this); eventBus.addRegistrationListener( new ResourceRegistrationListener() { @Override public void resourceUnregistered(String uuid) { System.out.println("Unregistered " + uuid); } @Override public void resourceRegistered(String uuid, Page page) { System.out.println("Registered " + uuid); } }); ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1); final Runnable beeper = new Runnable() { @Override public void run() { try { eventBus.post(new Date()); } catch (Exception e) { e.printStackTrace(); } } }; scheduler.scheduleWithFixedDelay(beeper, 2, 2, TimeUnit.SECONDS); }
@Override public void contextInitialized(ServletContextEvent arg0) { smsCreatorScheduler = Executors.newSingleThreadScheduledExecutor(); smsCreatorScheduler.scheduleWithFixedDelay( new SmsCreatorThread(), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_SMS_CREATORL_INIT_DELAY)), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_SMS_CREATOR_PROC_DELAY)), TimeUnit.SECONDS); schedulerLogging.info("smsCreatorScheduler : startup context"); smsSenderScheduler = Executors.newSingleThreadScheduledExecutor(); smsSenderScheduler.scheduleWithFixedDelay( new SmsSenderThread(), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_SMS_SENDER_INIT_DELAY)), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_SMS_SENDER_PROC_DELAY)), TimeUnit.MINUTES); schedulerLogging.info("smsSenderScheduler : startup context"); mailScheduler = Executors.newSingleThreadScheduledExecutor(); mailScheduler.scheduleWithFixedDelay( new SingleMailmanThread(), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_STANNDARD_MAIL_INIT_DELAY)), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_STANNDARD_MAIL_PROC_DELAY)), TimeUnit.SECONDS); schedulerLogging.info("mailScheduler : startup context"); mailCreator = Executors.newSingleThreadScheduledExecutor(); mailCreator.scheduleWithFixedDelay( new EmailCreatorThread(), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_EMAIL_CREATORL_INIT_DELAY)), ParseUtil.sToL(processSchedulerConfig.get(Constants.PROP_EMAIL_CREATOR_PROC_DELAY)), TimeUnit.SECONDS); schedulerLogging.info("mailCreator : startup context"); }
@Test public void shouldNotExhaustThreads() throws Exception { final ScheduledExecutorService executorService = Executors.newScheduledThreadPool(2, testingThreadFactory); final GremlinExecutor gremlinExecutor = GremlinExecutor.build() .executorService(executorService) .scheduledExecutorService(executorService) .create(); final AtomicInteger count = new AtomicInteger(0); assertTrue( IntStream.range(0, 1000) .mapToObj(i -> gremlinExecutor.eval("1+1")) .allMatch( f -> { try { return (Integer) f.get() == 2; } catch (Exception ex) { throw new RuntimeException(ex); } finally { count.incrementAndGet(); } })); assertEquals(1000, count.intValue()); executorService.shutdown(); executorService.awaitTermination(30000, TimeUnit.MILLISECONDS); }
// Start moving the BubbleView & updating the display private void start() { // Creates a WorkerThread ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); // Execute the run() in Worker Thread every REFRESH_RATE // milliseconds // Save reference to this job in mMoverFuture mMoverFuture = executor.scheduleWithFixedDelay( new Runnable() { @Override public void run() { // implement movement logic. // Each time this method is run the BubbleView should // move one step. If the BubbleView exits the display, // stop the BubbleView's Worker Thread. // Otherwise, request that the BubbleView be redrawn. boolean exits = moveWhileOnScreen(); if (exits) { stop(false); } else { BubbleView.this.postInvalidate(); } } }, 0, REFRESH_RATE, TimeUnit.MILLISECONDS); }
{ ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); updateDetectorFuture = executor.scheduleWithFixedDelay( new StatusUpdateDetector(), 5, POLL_PERIOD, TimeUnit.SECONDS); }
private void createTimeoutTasks(final Thread t) { scenarioTimeoutInterrupt = timeoutExcecutor.schedule( new Runnable() { public void run() { timeoutIfStillRunning(t); } }, scenarioTimeoutMillis, TimeUnit.MILLISECONDS); scenarioTimeoutStopThread = timeoutExcecutor.schedule( new Runnable() { public void run() { stopThreadIfStillRunning(t); } }, scenarioTimeoutMillis * 2, TimeUnit.MILLISECONDS); scenarioTimeoutKill = timeoutExcecutor.schedule( new Runnable() { public void run() { killInterpreterIfStillRunning(t); } }, scenarioTimeoutMillis * 3, TimeUnit.MILLISECONDS); }
/** * Adjusts adaptive poll frequency. * * @param pollInterval poll frequency in seconds */ synchronized void adjustCalAndPollInterval(int pollInterval) { initMemberVars(pollInterval); if (calAndShortFlowsThread != null) { calAndShortFlowsThread.cancel(false); } if (midFlowsThread != null) { midFlowsThread.cancel(false); } if (longFlowsThread != null) { longFlowsThread.cancel(false); } calAndShortFlowsTask = new CalAndShortFlowsTask(); calAndShortFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( calAndShortFlowsTask, 0, calAndPollInterval, TimeUnit.SECONDS); midFlowsTask = new MidFlowsTask(); midFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( midFlowsTask, 0, midPollInterval, TimeUnit.SECONDS); longFlowsTask = new LongFlowsTask(); longFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( longFlowsTask, 0, longPollInterval, TimeUnit.SECONDS); log.debug("calAndPollInterval=" + calAndPollInterval + "is adjusted"); }
/** * Submits or schedules an {@link ApplicationTask} on the the shared executor service. * * @param task The task * @param delay Initial delay in milliseconds, or zero for ASAP * @param repeatEvery Repeat delay in milliseconds, or zero for no repetition * @param fixedRepeat Whether repetitions are at fixed times, or if the repeat delay begins when * the task ends * @return A future for the task */ private <T> Future<T> task( ApplicationTask<T> task, int delay, int repeatEvery, boolean fixedRepeat) { ExecutorService executor = getExecutor(); if ((delay > 0) || (repeatEvery > 0)) { if (!(executor instanceof ScheduledExecutorService)) throw new RuntimeException( "Executor must implement the ScheduledExecutorService interface to allow for delayed tasks"); ScheduledExecutorService scheduledExecutor = (ScheduledExecutorService) executor; if (repeatEvery > 0) { if (fixedRepeat) { @SuppressWarnings("unchecked") ScheduledFuture<T> future = (ScheduledFuture<T>) scheduledExecutor.scheduleAtFixedRate( task, delay, repeatEvery, TimeUnit.MILLISECONDS); return future; } else { @SuppressWarnings("unchecked") ScheduledFuture<T> future = (ScheduledFuture<T>) scheduledExecutor.scheduleWithFixedDelay( task, delay, repeatEvery, TimeUnit.MILLISECONDS); return future; } } else return scheduledExecutor.schedule((Callable<T>) task, delay, TimeUnit.MILLISECONDS); } else return executor.submit((Callable<T>) task); }
/** Stop the decommission monitor thread, waiting briefly for it to terminate. */ void close() { executor.shutdownNow(); try { executor.awaitTermination(3000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { } }
public static synchronized void clearThreadPools() { if (globalThreadPool != null) { globalThreadPool.shutdown(); try { if (!globalThreadPool.awaitTermination(10, TimeUnit.SECONDS)) { throw new IllegalStateException("Couldn't finish the globalThreadPool"); } } catch (InterruptedException e) { } finally { globalThreadPool = null; } } if (globalScheduledThreadPool != null) { globalScheduledThreadPool.shutdown(); try { if (!globalScheduledThreadPool.awaitTermination(10, TimeUnit.SECONDS)) { throw new IllegalStateException("Couldn't finish the globalScheduledThreadPool"); } } catch (InterruptedException e) { } finally { globalScheduledThreadPool = null; } } }
public static void main(String[] args) throws Exception { BasicConfigurator.configure(); String username = "******"; final String TWITTER_OAUTH_KEY = ""; final String TWITTER_OAUTH_SECRET = ""; final String ACCESS_TOKEN = ""; final String ACCESS_TOKEN_SECRET = ""; OAuthSignpostClient oauth = new OAuthSignpostClient( TWITTER_OAUTH_KEY, TWITTER_OAUTH_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET); TwitterClient twitter = new TwitterClient(username, oauth); Connection conn = DriverManager.getConnection("jdbc:postgresql://juergenbickert.de:5432/smsd?user=smsd"); conn.setAutoCommit(false); conn.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE); SmsProvider smsProvider = new SmsProvider(conn); TwitterTask task = new TwitterTask(twitter, smsProvider); task.setAppend(" #tmn12"); ScheduledExecutorService pool = Executors.newScheduledThreadPool(1); pool.scheduleWithFixedDelay(task, 0, 1, TimeUnit.SECONDS); }
/** * This method follow the next steps: * * <ul> * <li>Launch each action with his delay. * <li>After the longest delay action get into next state. * <li>Return the next state. * </ul> * * @see com.yerbamateprimer.round.state.exit.StateExit#apply() */ @Override public State apply() { for (Entry<Long, Collection<SystemAction>> delayedActions : getGetOutActions().entrySet()) { Long delay = delayedActions.getKey(); final Collection<SystemAction> actions = delayedActions.getValue(); LOGGER.debug("The actions {} will be executed after {} millis, from now.", actions, delay); Runnable executeActions = new Runnable() { public void run() { for (SystemAction action : actions) { action.execute(); } } }; EXECUTOR.schedule(executeActions, delay, TimeUnit.MILLISECONDS); } Long getInDelay = getGetOutActions().keySet().iterator().next(); Runnable executeGetIn = new Runnable() { public void run() { getNextState().getIn(); } }; EXECUTOR.schedule(executeGetIn, getInDelay, TimeUnit.MILLISECONDS); return getNextState(); }
/** * Tests that a partition with expired connections should those connections killed off. * * @throws SQLException */ @Test @SuppressWarnings({"unchecked", "rawtypes"}) public void testConnectionExpired() throws SQLException { TransferQueue<ConnectionHandle> mockQueue = createNiceMock(TransferQueue.class); expect(mockConnectionPartition.getAvailableConnections()).andReturn(1); expect(mockConnectionPartition.getFreeConnections()).andReturn(mockQueue).anyTimes(); ConnectionHandle mockConnectionExpired = createNiceMock(ConnectionHandle.class); ConnectionHandle mockConnection = createNiceMock(ConnectionHandle.class); expect(mockQueue.poll()).andReturn(mockConnectionExpired).once(); expect(mockConnectionExpired.isExpired(anyLong())).andReturn(true).once(); expect(mockExecutor.isShutdown()).andReturn(false).once(); mockConnectionExpired.internalClose(); expectLastCall().once(); mockPool.postDestroyConnection(mockConnectionExpired); expectLastCall().once(); expect(mockExecutor.schedule((Callable) anyObject(), anyLong(), (TimeUnit) anyObject())) .andReturn(null) .once(); replay( mockQueue, mockExecutor, mockConnectionPartition, mockConnection, mockPool, mockConnectionExpired); testClass.run(); verify(mockConnectionExpired); }
/** start adaptive flow statistic collection. */ public synchronized void start() { log.debug("Starting AdaptiveStats collection thread for {}", sw.getStringId()); callCountCalAndShortFlowsTask = 0; callCountMidFlowsTask = 0; callCountLongFlowsTask = 0; isFirstTimeStart = true; // Initially start polling quickly. Then drop down to configured value calAndShortFlowsTask = new CalAndShortFlowsTask(); calAndShortFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( calAndShortFlowsTask, 1, calAndPollInterval, TimeUnit.SECONDS); midFlowsTask = new MidFlowsTask(); midFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( midFlowsTask, 1, midPollInterval, TimeUnit.SECONDS); longFlowsTask = new LongFlowsTask(); longFlowsThread = adaptiveFlowStatsScheduler.scheduleWithFixedDelay( longFlowsTask, 1, longPollInterval, TimeUnit.SECONDS); log.info("Started"); }
public static void main(String[] args) { ScheduledExecutorService service = Executors.newScheduledThreadPool(2); service.scheduleAtFixedRate( new Runnable() { @Override public void run() { System.out.println("haha"); } }, 0, 1, TimeUnit.SECONDS); service.schedule( new Runnable() { @Override public void run() { System.out.println("!!!!!!!!!!"); } }, 10, TimeUnit.SECONDS); }
@Test(groups = "short") public void should_throttle_requests() { // Throttle to a very low value. Even a single thread can generate a higher throughput. final int maxRequests = 10; cluster .getConfiguration() .getPoolingOptions() .setMaxRequestsPerConnection(HostDistance.LOCAL, maxRequests); // Track in flight requests in a dedicated thread every second final AtomicBoolean excessInflightQueriesSpotted = new AtomicBoolean(false); final Host host = cluster.getMetadata().getHost(new InetSocketAddress(CCMBridge.IP_PREFIX + "1", 9042)); ScheduledExecutorService openConnectionsWatcherExecutor = Executors.newScheduledThreadPool(1); final Runnable openConnectionsWatcher = new Runnable() { @Override public void run() { int inFlight = session.getState().getInFlightQueries(host); if (inFlight > maxRequests) excessInflightQueriesSpotted.set(true); } }; openConnectionsWatcherExecutor.scheduleAtFixedRate( openConnectionsWatcher, 200, 200, TimeUnit.MILLISECONDS); // Generate the load for (int i = 0; i < 10000; i++) session.executeAsync("SELECT release_version FROM system.local"); openConnectionsWatcherExecutor.shutdownNow(); if (excessInflightQueriesSpotted.get()) { fail("Inflight queries exceeded the limit"); } }
private StatsCollector(Map<String, String> configs) { ComponentLocator locator = ComponentLocator.getLocator(ManagementServer.Name); _agentMgr = locator.getManager(AgentManager.class); _userVmMgr = locator.getManager(UserVmManager.class); _hostDao = locator.getDao(HostDao.class); _userVmDao = locator.getDao(UserVmDao.class); _volsDao = locator.getDao(VolumeDao.class); _capacityDao = locator.getDao(CapacityDao.class); _storagePoolDao = locator.getDao(StoragePoolDao.class); _storageManager = locator.getManager(StorageManager.class); _storagePoolHostDao = locator.getDao(StoragePoolHostDao.class); _executor = Executors.newScheduledThreadPool(3, new NamedThreadFactory("StatsCollector")); hostStatsInterval = NumbersUtil.parseLong(configs.get("host.stats.interval"), 60000L); hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L); storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L); volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L); _executor.scheduleWithFixedDelay( new HostCollector(), 15000L, hostStatsInterval, TimeUnit.MILLISECONDS); _executor.scheduleWithFixedDelay( new VmStatsCollector(), 15000L, hostAndVmStatsInterval, TimeUnit.MILLISECONDS); _executor.scheduleWithFixedDelay( new StorageCollector(), 15000L, storageStatsInterval, TimeUnit.MILLISECONDS); // -1 means we don't even start this thread to pick up any data. if (volumeStatsInterval > 0) { _executor.scheduleWithFixedDelay( new VolumeCollector(), 15000L, volumeStatsInterval, TimeUnit.MILLISECONDS); } else { s_logger.info("Disabling volume stats collector"); } }
public static void runTask() { Runnable runnable = new Runnable() { public void run() { // task to run goes here try { String filePath = this.getClass().getResource("/").getPath(); filePath = filePath + "douban-rating-300.txt"; BookResult.runBookResult(filePath); } catch (TasteException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (SQLException e) { // TODO Auto-generated catch block e.printStackTrace(); } } }; ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor(); // 第二个参数为首次执行的延时时间,第三个参数为定时执行的间隔时间 service.scheduleAtFixedRate(runnable, 10, TIME, TimeUnit.SECONDS); }
public static void main(String[] args) throws InterruptedException, ExecutionException { // *1 ScheduledExecutorService service = Executors.newScheduledThreadPool(2); // *2 Runnable task1 = new Runnable() { public void run() { System.out.println("Taskrepeating."); } }; // *3 每隔5秒执行一次 final ScheduledFuture future1 = service.scheduleAtFixedRate(task1, 0, 1, TimeUnit.SECONDS); // *4 ScheduledFuture future2 = service.schedule( new Callable() { public String call() { future1.cancel(true); return "taskcancelled!"; } }, 10, TimeUnit.SECONDS); System.out.println(future2.get()); // *5 service.shutdown(); }
@SuppressWarnings({"rawtypes", "unchecked"}) public void testMonitorAndContinueWithoutTimeout() { ScheduledFuture mockFuture = EasyMock.createMock(ScheduledFuture.class); ScheduledExecutorService schedulerMock = EasyMock.createMock(ScheduledExecutorService.class); expect( schedulerMock.scheduleWithFixedDelay( anyObject(Runnable.class), anyLong(), anyLong(), anyObject(TimeUnit.class))) .andReturn(mockFuture); replay(mockFuture); replay(schedulerMock); CoutingEventHandler handler = new CoutingEventHandler(); EventBus eventBus = new EventBus(); eventBus.register(handler); AsyncMonitor<Object> monitor = mockMonitor(schedulerMock, new Object(), mockFunction(MonitorStatus.CONTINUE), eventBus); assertNull(monitor.getFuture()); assertNull(monitor.getTimeout()); monitor.startMonitoring(null); assertNotNull(monitor.getFuture()); assertNull(monitor.getTimeout()); monitor.run(); assertEquals(handler.numCompletes, 0); assertEquals(handler.numFailures, 0); assertEquals(handler.numTimeouts, 0); verify(mockFuture); verify(schedulerMock); }
private void _initCommitScheduler() { if ((PropsValues.LUCENE_COMMIT_BATCH_SIZE <= 0) || (PropsValues.LUCENE_COMMIT_TIME_INTERVAL <= 0)) { return; } ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(); Runnable runnable = new Runnable() { @Override public void run() { try { if (_batchCount > 0) { _doCommit(); } } catch (IOException ioe) { _log.error("Could not run scheduled commit", ioe); } } }; scheduledExecutorService.scheduleWithFixedDelay( runnable, 0, PropsValues.LUCENE_COMMIT_TIME_INTERVAL, TimeUnit.MILLISECONDS); }
@SuppressWarnings({"rawtypes", "unchecked"}) public void testStartMonitoringWithTimeout() { ScheduledFuture mockFuture = EasyMock.createMock(ScheduledFuture.class); ScheduledExecutorService schedulerMock = EasyMock.createMock(ScheduledExecutorService.class); expect( schedulerMock.scheduleWithFixedDelay( anyObject(Runnable.class), anyLong(), anyLong(), anyObject(TimeUnit.class))) .andReturn(mockFuture); replay(mockFuture); replay(schedulerMock); AsyncMonitor<Object> monitor = mockMonitor(schedulerMock, new Object(), mockFunction(MonitorStatus.DONE), new EventBus()); assertNull(monitor.getFuture()); assertNull(monitor.getTimeout()); monitor.startMonitoring(100L); assertNotNull(monitor.getFuture()); assertNotNull(monitor.getTimeout()); assertTrue(monitor.getTimeout() > 100L); verify(mockFuture); verify(schedulerMock); }
public static void main(String[] args) { /** 使用工厂方法初始化一个ScheduledThreadPool */ ScheduledExecutorService newScheduledThreadPool = Executors.newScheduledThreadPool(2); TimerTask task1 = new TimerTask() { @Override public void run() { try { System.out.println("task1 invoked ! " + (System.currentTimeMillis() - start)); Thread.sleep(3000); } catch (Exception e) { e.printStackTrace(); } } }; TimerTask task2 = new TimerTask() { @Override public void run() { System.out.println("task2 invoked ! " + (System.currentTimeMillis() - start)); } }; start = System.currentTimeMillis(); newScheduledThreadPool.schedule(task1, 1000, TimeUnit.MILLISECONDS); newScheduledThreadPool.schedule(task2, 3000, TimeUnit.MILLISECONDS); }
public void close() throws IOException { flush(); if (!commitExecutor.isShutdown()) { commitExecutor.shutdown(); try { if (!commitExecutor.awaitTermination(5, TimeUnit.MINUTES)) throw new OException("Background data flush task can not be stopped."); } catch (InterruptedException e) { OLogManager.instance().error(this, "Data flush thread was interrupted"); Thread.interrupted(); throw new OException("Data flush thread was interrupted", e); } } synchronized (syncObject) { for (OFileClassic fileClassic : files.values()) { if (fileClassic.isOpen()) fileClassic.close(); } if (nameIdMapHolder != null) { nameIdMapHolder.setLength(0); for (Map.Entry<String, Long> entry : nameIdMap.entrySet()) { writeNameIdEntry(new NameFileIdEntry(entry.getKey(), entry.getValue()), false); } nameIdMapHolder.getFD().sync(); nameIdMapHolder.close(); } } }
// Start moving the BubbleView & updating the display private void startMovement() { // Creates a WorkerThread ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); // Execute the run() in Worker Thread every REFRESH_RATE // milliseconds // Save reference to this job in mMoverFuture mMoverFuture = executor.scheduleWithFixedDelay( new Runnable() { @Override public void run() { // Each time this method is run the BubbleView should // move one step. If the BubbleView exits the display, // stop the BubbleView's Worker Thread. // Otherwise, request that the BubbleView be redrawn. while (moveWhileOnScreen()) { postInvalidate(); try { Thread.sleep(1000); } catch (InterruptedException e) { mFrame.removeView(BubbleView.this); } } stopMovement(false); } }, 0, REFRESH_RATE, TimeUnit.MILLISECONDS); }
public void delete() throws IOException { synchronized (syncObject) { for (long fileId : files.keySet()) doDeleteFile(fileId); if (nameIdMapHolderFile != null) { nameIdMapHolder.close(); if (!nameIdMapHolderFile.delete()) throw new OStorageException( "Can not delete disk cache file which contains name-id mapping."); } } if (!commitExecutor.isShutdown()) { commitExecutor.shutdown(); try { if (!commitExecutor.awaitTermination(5, TimeUnit.MINUTES)) throw new OException("Background data flush task can not be stopped."); } catch (InterruptedException e) { OLogManager.instance().error(this, "Data flush thread was interrupted"); Thread.interrupted(); throw new OException("Data flush thread was interrupted", e); } } }
/** * {@inheritDoc} * * @throws IOException when instance in configuration can not be opened and closed. */ @Override public final void run(final KijiRESTConfiguration configuration, final Environment environment) throws IOException { final KijiURI clusterURI = KijiURI.newBuilder(configuration.getClusterURI()).build(); // Load specified instances and health checks for each. final Set<KijiURI> instances = InstanceUtil.getInstances( clusterURI, new InstancesMapTo<KijiURI>() { public KijiURI apply(KijiURI instanceURI) throws IOException { InstanceUtil.openAndCloseInstance(instanceURI); LOG.info("Loading instance {} upon startup.", instanceURI.toOrderedString()); environment.addHealthCheck(new InstanceHealthCheck(instanceURI)); return instanceURI; } }); final ManagedKijiClient managedKijiClient = new ManagedKijiClient(instances); environment.manage(managedKijiClient); // Remove all built-in Dropwizard ExceptionHandler. // Always depend on custom ones. // Inspired by Jeremy Whitlock's suggestion on thoughtspark.org. Set<Object> jerseyResources = environment.getJerseyResourceConfig().getSingletons(); Iterator<Object> jerseyResourcesIterator = jerseyResources.iterator(); while (jerseyResourcesIterator.hasNext()) { Object jerseyResource = jerseyResourcesIterator.next(); if (jerseyResource instanceof ExceptionMapper && jerseyResource.getClass().getName().startsWith("com.yammer.dropwizard.jersey")) { jerseyResourcesIterator.remove(); } } // Update instances periodically. final RefreshInstances instanceRefresher = new RefreshInstances(clusterURI, managedKijiClient); ScheduledExecutorService scheduler = environment.managedScheduledExecutorService("instance_refresh_scheduler", 1); scheduler.scheduleAtFixedRate( instanceRefresher, INSTANCE_REFRESH_PERIOD_MINUTES, // Start a period from now. INSTANCE_REFRESH_PERIOD_MINUTES, MINUTES); // Load admin task to manually update instances. environment.addTask(new RefreshInstancesTask(instanceRefresher)); // Load resources. for (KijiRestPlugin plugin : Lookups.get(KijiRestPlugin.class)) { LOG.info("Loading plugin {}", plugin.getClass()); plugin.install(managedKijiClient, configuration, environment); } // Allow global CORS filter. CORS off by default. if (configuration.getCORS()) { environment.addFilter( CrossOriginFilter.class, configuration.getHttpConfiguration().getRootPath()); LOG.info("Global cross-origin resource sharing is allowed."); } }
public static void main(String[] args) throws Exception { final String[] arguments = args; ScheduledExecutorService executor = Executors.newScheduledThreadPool(3); executor.scheduleAtFixedRate( new Runnable() { public void run() { try { AutoScaleCluster cluster = new AutoScaleCluster(); JCommander jc = new JCommander(cluster); jc.setProgramName("AutoScaleCluster"); try { jc.parse(arguments); } catch (ParameterException e) { System.err.println(e.getMessage()); jc.usage(); System.exit(-1); } cluster.start(); } catch (Exception ex) { ex.printStackTrace(); // or loggger would be better } } }, 0, 1, TimeUnit.MINUTES); }