void start() { if (t == null) { t = new Thread(this, "UDP.OutgoingPacketHandler thread"); t.setDaemon(true); t.start(); } }
private static void testPotato( Class<? extends Collection> implClazz, Class<? extends List> argClazz) throws Throwable { try { System.out.printf("implClazz=%s, argClazz=%s\n", implClazz.getName(), argClazz.getName()); final int iterations = 100000; final List<Integer> list = (List<Integer>) argClazz.newInstance(); final Integer one = Integer.valueOf(1); final List<Integer> oneElementList = Collections.singletonList(one); final Constructor<? extends Collection> constr = implClazz.getConstructor(Collection.class); final Thread t = new CheckedThread() { public void realRun() { for (int i = 0; i < iterations; i++) { list.add(one); list.remove(one); } } }; t.setDaemon(true); t.start(); for (int i = 0; i < iterations; i++) { Collection<?> coll = constr.newInstance(list); Object[] elts = coll.toArray(); check(elts.length == 0 || (elts.length == 1 && elts[0] == one)); } } catch (Throwable t) { unexpected(t); } }
@Override public void sendFileAsync(File file, Message message, Consumer<Message> callback) { checkVerification(); if (!checkPermission(getJDA().getSelfInfo(), Permission.MESSAGE_WRITE)) throw new PermissionException(Permission.MESSAGE_WRITE); if (!checkPermission(getJDA().getSelfInfo(), Permission.MESSAGE_ATTACH_FILES)) throw new PermissionException(Permission.MESSAGE_ATTACH_FILES); Thread thread = new Thread( () -> { Message messageReturn; try { messageReturn = sendFile(file, message); } catch (RateLimitedException e) { JDAImpl.LOG.warn( "Got ratelimited when trying to upload file. Providing null to callback."); messageReturn = null; } if (callback != null) callback.accept(messageReturn); }); thread.setName("TextChannelImpl sendFileAsync Channel: " + id); thread.setDaemon(true); thread.start(); }
/** * 'handler' can be of any type that implements 'exportedInterface', but only methods declared by * the interface (and its superinterfaces) will be invocable. */ public <T> InAppServer( String name, String portFilename, InetAddress inetAddress, Class<T> exportedInterface, T handler) { this.fullName = name + "Server"; this.exportedInterface = exportedInterface; this.handler = handler; // In the absence of authentication, we shouldn't risk starting a server as root. if (System.getProperty("user.name").equals("root")) { Log.warn( "InAppServer: refusing to start unauthenticated server \"" + fullName + "\" as root!"); return; } try { File portFile = FileUtilities.fileFromString(portFilename); secretFile = new File(portFile.getPath() + ".secret"); Thread serverThread = new Thread(new ConnectionAccepter(portFile, inetAddress), fullName); // If there are no other threads left, the InApp server shouldn't keep us alive. serverThread.setDaemon(true); serverThread.start(); } catch (Throwable th) { Log.warn("InAppServer: couldn't start \"" + fullName + "\".", th); } writeNewSecret(); }
public void start() { _mngmtLock.writeLock().lock(); try { if (!checkState(State.INIT, State.RUNNING)) { __log.debug("start() ignored -- already started"); return; } __log.debug("BPEL SERVER starting."); // Eventually running some migrations before starting if (!(new MigrationHandler(_contexts) .migrate(_registeredProcesses, _migrationTransactionTimeout))) { throw new BpelEngineException( "An error occurred while migrating your database to a newer version of the server. Please make sure that the required migration scripts have been executed before starting the server."); } _state = State.RUNNING; __log.info(__msgs.msgServerStarted()); if (_dehydrationPolicy != null) { processDefReaper = new Thread(new ProcessDefReaper(), "Dehydrator"); processDefReaper.setDaemon(true); processDefReaper.start(); } } finally { _mngmtLock.writeLock().unlock(); } }
/** * @param views Guaranteed to be non-null and to have >= 2 members, or else this thread would * not be started */ public synchronized void start(Map<Address, View> views) { if (thread == null || thread.isAlive()) { this.coords.clear(); // now remove all members which don't have us in their view, so RPCs won't block (e.g. // FLUSH) // https://jira.jboss.org/browse/JGRP-1061 sanitizeViews(views); // Add all different coordinators of the views into the hashmap and sets their members: Collection<Address> coordinators = Util.determineMergeCoords(views); for (Address coord : coordinators) { View view = views.get(coord); if (view != null) this.coords.put(coord, new ArrayList<Address>(view.getMembers())); } // For the merge participants which are not coordinator, we simply add them, and the // associated // membership list consists only of themselves Collection<Address> merge_participants = Util.determineMergeParticipants(views); merge_participants.removeAll(coordinators); for (Address merge_participant : merge_participants) { Collection<Address> tmp = new ArrayList<Address>(); tmp.add(merge_participant); coords.putIfAbsent(merge_participant, tmp); } thread = gms.getThreadFactory().newThread(this, "MergeTask"); thread.setDaemon(true); thread.start(); } }
/* (non-Javadoc) * @see net.xeoh.plugins.remotediscovery.impl.v4.probes.AbstractProbe#startup() */ @Override public void startup() { super.startup(); final Thread thread = new Thread( new Runnable() { @Override public void run() { backgroundInit(); } }); thread.setDaemon(true); thread.start(); this.timer = new Timer(); this.timer.schedule( new TimerTask() { @Override public void run() { discoverThread(); } }, 0, 260); }
public SegmentCacheManager(MondrianServer server) { this.server = server; ACTOR = new Actor(); thread = new Thread(ACTOR, "mondrian.rolap.agg.SegmentCacheManager$ACTOR"); thread.setDaemon(true); thread.start(); // Create the index registry. this.indexRegistry = new SegmentCacheIndexRegistry(); // Add a local cache, if needed. if (!MondrianProperties.instance().DisableCaching.get()) { final MemorySegmentCache cache = new MemorySegmentCache(); segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); } // Add an external cache, if configured. final List<SegmentCache> externalCache = SegmentCacheWorker.initCache(); for (SegmentCache cache : externalCache) { // Create a worker for this external cache segmentCacheWorkers.add(new SegmentCacheWorker(cache, thread)); // Hook up a listener so it can update // the segment index. cache.addListener(new AsyncCacheListener(this, server)); } compositeCache = new CompositeSegmentCache(segmentCacheWorkers); }
private void obtaindate() { Thread th = new Thread() { @Override public void run() { for (; true; ) { Calendar cd = new GregorianCalendar(); int mon = cd.get(Calendar.MONTH); mon++; int year = cd.get(Calendar.YEAR); int day = cd.get(Calendar.DAY_OF_MONTH); date_text.setText("" + day + "/" + mon + "/" + year); int hh = cd.get(Calendar.HOUR_OF_DAY); int min = cd.get(Calendar.MINUTE); int sec = cd.get(Calendar.SECOND); time_txt.setText("" + hh + ":" + min + ":" + sec); try { Thread.sleep(1000); } catch (InterruptedException ex) { Logger.getLogger(Amin.class.getName()).log(Level.SEVERE, null, ex); } } } }; th.setDaemon(true); th.start(); }
@Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setDaemon(true); t.setName("shell-bolt-heartbeat"); return t; }
public void start() { if (thread == null) { thread = new Thread(this, "UDP.UcastReceiverThread"); thread.setDaemon(true); running = true; thread.start(); } }
public Notifier(String listenNodePath) { super.setDaemon(true); super.setName("LTSRedisSubscribe"); this.listenNodePath = listenNodePath; if (monitorId == null) { monitorId = listenNodePath; } }
@Override protected void setUp() throws Exception { tmp = IO.getFile("generated/tmp"); tmp.mkdirs(); IO.copy(IO.getFile("testdata/ws"), tmp); workspace = Workspace.getWorkspace(tmp); workspace.refresh(); InfoRepository repo = workspace.getPlugin(InfoRepository.class); t1 = create("bsn-1", new Version(1, 0, 0)); t2 = create("bsn-2", new Version(1, 0, 0)); repo.put(new FileInputStream(t1), null); repo.put(new FileInputStream(t2), null); t1 = repo.get("bsn-1", new Version(1, 0, 0), null); t2 = repo.get("bsn-2", new Version(1, 0, 0), null); repo.put(new FileInputStream(IO.getFile("generated/biz.aQute.remote.launcher.jar")), null); workspace.getPlugins().add(repo); File storage = IO.getFile("generated/storage-1"); storage.mkdirs(); configuration = new HashMap<String, Object>(); configuration.put( Constants.FRAMEWORK_STORAGE_CLEAN, Constants.FRAMEWORK_STORAGE_CLEAN_ONFIRSTINIT); configuration.put(Constants.FRAMEWORK_STORAGE, storage.getAbsolutePath()); configuration.put( Constants.FRAMEWORK_SYSTEMPACKAGES_EXTRA, "org.osgi.framework.launch;version=1.2"); framework = new org.apache.felix.framework.FrameworkFactory().newFramework(configuration); framework.init(); framework.start(); context = framework.getBundleContext(); location = "reference:" + IO.getFile("generated/biz.aQute.remote.agent.jar").toURI().toString(); agent = context.installBundle(location); agent.start(); thread = new Thread() { @Override public void run() { try { Main.main( new String[] { "-s", "generated/storage", "-c", "generated/cache", "-p", "1090", "-et" }); } catch (Exception e) { e.printStackTrace(); } } }; thread.setDaemon(true); thread.start(); super.setUp(); }
/** Mutes or unmutes the associated <tt>Call</tt> upon clicking this button. */ public void toggleMute() { if (muteRunner == null) { muteRunner = new Thread(this, getToolTipText()); muteRunner.setDaemon(true); setEnabled(false); muteRunner.start(); } }
synchronized void start() { if (queue.closed()) queue.reset(); if (thread == null || !thread.isAlive()) { thread = getThreadFactory().newThread(this, "ViewHandler"); thread.setDaemon( false); // thread cannot terminate if we have tasks left, e.g. when we as coord leave thread.start(); } }
public void startConsole(boolean jLine) { this.jLine = jLine; sender = new ColoredCommandSender(); Thread thread = new ConsoleCommandThread(); thread.setName("ConsoleCommandThread"); thread.setDaemon(true); thread.start(); }
@Test public void par_update_get_compact() throws InterruptedException { int scale = TT.scale(); if (scale == 0) return; int threadNum = Math.min(4, scale * 4); final long end = TT.nowPlusMinutes(10); e = openEngine(); final BlockingQueue<Fun.Pair<Long, byte[]>> q = new ArrayBlockingQueue(threadNum * 10); for (int i = 0; i < threadNum; i++) { byte[] b = TT.randomByteArray(new Random().nextInt(10000)); long recid = e.put(b, BYTE_ARRAY_NOSIZE); q.put(new Fun.Pair(recid, b)); } final CountDownLatch l = new CountDownLatch(2); Thread tt = new Thread() { @Override public void run() { try { while (l.getCount() > 1) e.compact(); } finally { l.countDown(); } } }; tt.setDaemon(true); tt.run(); Exec.execNTimes( threadNum, new Callable() { @Override public Object call() throws Exception { Random r = new Random(); while (System.currentTimeMillis() < end) { Fun.Pair<Long, byte[]> t = q.take(); assertTrue( Serializer.BYTE_ARRAY.equals(t.b, e.get(t.a, Serializer.BYTE_ARRAY_NOSIZE))); int size = r.nextInt(1000); if (r.nextInt(10) == 1) size = size * 100; byte[] b = TT.randomByteArray(size); e.update(t.a, b, Serializer.BYTE_ARRAY_NOSIZE); q.put(new Fun.Pair<Long, byte[]>(t.a, b)); } return null; } }); l.countDown(); l.await(); for (Fun.Pair<Long, byte[]> t : q) { assertTrue(Serializer.BYTE_ARRAY.equals(t.b, e.get(t.a, Serializer.BYTE_ARRAY_NOSIZE))); } e.close(); }
public synchronized void connect(long timeout) throws TransportException { try { switch (state) { case 0: break; case 3: return; // already connected case 4: state = 0; throw new TransportException("Connection in error", te); default: TransportException te = new TransportException("Invalid state: " + state); state = 0; throw te; } state = 1; te = null; thread = new Thread(this, name); thread.setDaemon(true); synchronized (thread) { thread.start(); thread.wait(timeout); /* wait for doConnect */ switch (state) { case 1: /* doConnect never returned */ state = 0; thread = null; throw new TransportException("Connection timeout"); case 2: if (te != null) { /* doConnect throw Exception */ state = 4; /* error */ thread = null; throw te; } state = 3; /* Success! */ return; } } } catch (InterruptedException ie) { state = 0; thread = null; throw new TransportException(ie); } finally { /* This guarantees that we leave in a valid state */ if (state != 0 && state != 3 && state != 4) { if (log.level >= 1) log.println("Invalid state: " + state); state = 0; thread = null; } } }
public EventListModel() { eventConnector = new EventConnector("event_schema.txt"); eventConnector.setListener(this); eventControl = new EventLoggerControl(); eventControl.stop(65535); Thread thread = new Thread(this); thread.setDaemon(true); thread.start(); }
/** Enqueues the Runnable object, and executes it on a processor thread. */ public void dispatch(Runnable runner, boolean isLIFO) { isLIFO = false; synchronized (queue) { if (threadCount < maxThreadCount) { if (isLIFO) { queue.addFirst(runner); } else { queue.addLast(runner); } Thread processor = new Thread(this + " Processor") { public void run() { processEvents(); } }; threadCount++; // The processor thread must not be a daemon, // or else the Java VM might stop before // all runnables have been processed. try { processor.setDaemon(false); } catch (SecurityException e) { e.printStackTrace(); } try { processor.setPriority(priority); } catch (SecurityException e) { e.printStackTrace(); } processor.start(); return; } else if (blockingPolicy == ENQUEUE_WHEN_BLOCKED) { if (isLIFO) { queue.addFirst(runner); } else { queue.addLast(runner); } return; } } // implicit: if (threadCount >= maxThreadCount && blockingPolicy == RUN_WHEN_BLOCKED) runner.run(); }
static { // Create and start thread for actor. // // Actor is shared between all servers. This reduces concurrency, but // not a concern because monitoring events are not very numerous. // We tried creating one actor (and therefore thread) per server, but // some applications (and in particular some tests) create lots of // servers. // // The actor is shut down with the JVM. final Thread thread = new Thread(ACTOR, "Mondrian Monitor"); thread.setDaemon(true); thread.start(); }
/** * Asynchronously sends a message to a single server, registering a listener to receive a callback * on success or exception. Multiple asynchronous lookups can be performed in parallel. Since the * callback may be invoked before the function returns, external synchronization is necessary. * * @param query The query to send * @param listener The object containing the callbacks. * @return An identifier, which is also a parameter in the callback */ public Object sendAsync(final Message query, final ResolverListener listener) { final Object id; synchronized (this) { id = new Integer(uniqueID++); } Record question = query.getQuestion(); String qname; if (question != null) qname = question.getName().toString(); else qname = "(none)"; String name = this.getClass() + ": " + qname; Thread thread = new ResolveThread(this, query, id, listener); thread.setName(name); thread.setDaemon(true); thread.start(); return id; }
/** * Creates and starts the {@link #sendKeepAliveMessageThread} which is to send STUN keep-alive * <tt>Message</tt>s to the STUN server associated with the <tt>StunCandidateHarvester</tt> of * this instance in order to keep the <tt>Candidate</tt>s harvested by this instance alive. */ private void createSendKeepAliveMessageThread() { synchronized (sendKeepAliveMessageSyncRoot) { Thread t = new SendKeepAliveMessageThread(this); t.setDaemon(true); t.setName(getClass().getName() + ".sendKeepAliveMessageThread: " + hostCandidate); boolean started = false; sendKeepAliveMessageThread = t; try { t.start(); started = true; } finally { if (!started && (sendKeepAliveMessageThread == t)) sendKeepAliveMessageThread = null; } } }
@Override public boolean start() { if (LOG.isDebugEnabled()) { LOG.debug("==> AtlasTagSource.start()"); } if (consumerTask == null) { LOG.error("No consumerTask!!!"); } else { myThread = new Thread(consumerTask); myThread.setDaemon(true); myThread.start(); } if (LOG.isDebugEnabled()) { LOG.debug("<== AtlasTagSource.start()"); } return myThread != null; }
/** Re Initialize the stack instance. */ protected void reInit() { super.reInit(); clientTransactions = Collections.synchronizedList(new ArrayList<SIPClientTransaction>()); serverTransactions = Collections.synchronizedList(new ArrayList<SIPServerTransaction>()); pendingTransactions = new HashSet<SIPServerTransaction>(); pendingRecords = Collections.synchronizedList(new ArrayList<PendingRecord>()); clientTransactionTable = new Hashtable<String, SIPTransaction>(); serverTransactionTable = new Hashtable<String, SIPTransaction>(); // Dialog dable. this.dialogTable = new Hashtable<String, SIPDialog>(); this.timer = new Timer(); pendingRecordScanner = new Thread(new PendingRecordScanner(this)); pendingRecordScanner.setDaemon(true); pendingRecordScanner.setName("PendingRecordScanner"); pendingRecordScanner.start(); }
public static void main(PeerFactory peerFactory, KeyStore keyStore, String[] args) throws Exception { long time = System.currentTimeMillis(); String relPath; if ((args != null) && (args.length > 0) && args[0].equals("sh")) { relPath = pathToStoresSH; } else { relPath = pathToStores; } PATH = new File(System.getProperty("test.src", "."), relPath); CipherTest.peerFactory = peerFactory; System.out.print("Initializing test '" + peerFactory.getName() + "'..."); // secureRandom = new SecureRandom(); // secureRandom.nextInt(); // trustStore = readKeyStore(trustStoreFile); CipherTest.keyStore = keyStore; // keyStore = readKeyStore(keyStoreFile); KeyManagerFactory keyFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); keyFactory.init(keyStore, "test12".toCharArray()); keyManager = (X509ExtendedKeyManager) keyFactory.getKeyManagers()[0]; TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); tmf.init(keyStore); trustManager = (X509TrustManager) tmf.getTrustManagers()[0]; // trustManager = new AlwaysTrustManager(); SSLContext context = SSLContext.getInstance("TLS"); context.init(new KeyManager[] {keyManager}, new TrustManager[] {trustManager}, null); SSLContext.setDefault(context); CipherTest cipherTest = new CipherTest(peerFactory); Thread serverThread = new Thread(peerFactory.newServer(cipherTest), "Server"); serverThread.setDaemon(true); serverThread.start(); System.out.println("Done"); cipherTest.run(); time = System.currentTimeMillis() - time; System.out.println("Done. (" + time + " ms)"); }
/** * Initializes the server stored list. Synchronize server stored groups and contacts with the * local groups and contacts. */ @Override public void init() { try { SipAccountIDImpl accountID = (SipAccountIDImpl) sipProvider.getAccountID(); if (!accountID.isXiVOEnable()) return; boolean useSipCredentials = accountID.isClistOptionUseSipCredentials(); String serverAddress = accountID.getClistOptionServerUri(); String username = accountID.getAccountPropertyString(ProtocolProviderFactory.USER_ID); Address userAddress = sipProvider.parseAddressString(username); if (useSipCredentials) { username = ((SipUri) userAddress.getURI()).getUser(); } else { username = accountID.getClistOptionUser(); } try { connect(serverAddress); } catch (Throwable ex) { showError(ex, null, null); logger.error("Error connecting to server", ex); return; } Thread thread = new Thread(this, this.getClass().getName()); thread.setDaemon(true); thread.start(); if (!login(username)) { showError(null, null, "Unauthorized. Cannot login."); logger.error("Cannot login."); return; } } catch (Throwable t) { logger.error("Error init clist from xivo server"); } }
/** Starts the unicast and multicast receiver threads */ void startThreads() throws Exception { if (ucast_receiver == null) { // start the listener thread of the ucast_recv_sock ucast_receiver = new UcastReceiver(); ucast_receiver.start(); if (Trace.trace) { Trace.info("UDP.startThreads()", "created unicast receiver thread"); } } if (ip_mcast) { if (mcast_receiver != null) { if (mcast_receiver.isAlive()) { if (Trace.trace) { Trace.info( "UDP.createThreads()", "did not create new multicastreceiver thread as existing " + "multicast receiver thread is still running"); } } else { mcast_receiver = null; // will be created just below... } } if (mcast_receiver == null) { mcast_receiver = new Thread(this, "UDP mcast receiver"); mcast_receiver.setPriority(Thread.MAX_PRIORITY); // needed ???? mcast_receiver.setDaemon(true); mcast_receiver.start(); } } if (use_outgoing_packet_handler) { outgoing_packet_handler.start(); } if (use_incoming_packet_handler) { incoming_packet_handler.start(); } }
public void start() { inputs = new AtomicReferenceArray<>(0); outputs = inputs; freeReceptors = new CopyOnWriteArrayList<>(); sequence = 0; finalSequence = 0; finalProduct = null; reuseReceptors = new ArrayList<>(0); executor = Executors.newSingleThreadExecutor(); started = true; if (poolSize > 0) { group = new ThreadGroup("FatPipe"); group.setMaxPriority(Thread.MAX_PRIORITY); } signals = new SignalImpl[ARBITARY_THREADS + poolSize]; threads = new Thread[ARBITARY_THREADS + poolSize]; startTime = System.currentTimeMillis(); sleptTime = 0; toSleepTime = 0; for (int i = 0; i < poolSize; i++) { Thread t = new Thread(group, this, name + "-" + i); t.setDaemon(true); threads[i] = t; t.start(); if (sleep == 0) t.setPriority(Thread.MAX_PRIORITY); else if (sleep < 0) t.setPriority(Thread.NORM_PRIORITY); else t.setPriority(Thread.MIN_PRIORITY); } for (int i = 0; i < signals.length; i++) { signals[i] = new SignalImpl(lock); } if (pool != null) pool.add(this); lock.release(); }
/** * Runs a test query agains the stun server. If it works we set useStun to true, otherwise we set * it to false. */ private void launchStunServerTest() { Thread stunServerTestThread = new Thread("StunServerTestThread") { public void run() { DatagramSocket randomSocket = initRandomPortSocket(); try { StunAddress stunAddress = detector.getMappingFor(randomSocket); randomSocket.disconnect(); if (stunAddress != null) { useStun = true; logger.trace( "StunServer check succeeded for server: " + detector.getServerAddress() + " and local port: " + randomSocket.getLocalPort()); } else { useStun = false; logger.trace( "StunServer check failed for server: " + detector.getServerAddress() + " and local port: " + randomSocket.getLocalPort() + ". No address returned by server."); } } catch (Throwable ex) { logger.error( "Failed to run a stun query against " + "server :" + detector.getServerAddress(), ex); if (randomSocket.isConnected()) randomSocket.disconnect(); useStun = false; } } }; stunServerTestThread.setDaemon(true); stunServerTestThread.start(); }