void checkThreadInfo(ThreadInfo info) { if (!getName().equals(info.getThreadName())) { throw new RuntimeException( "Name: " + info.getThreadName() + " not matched. Expected: " + getName()); } MonitorInfo[] monitors = info.getLockedMonitors(); if (monitors.length != OWNED_MONITORS) { throw new RuntimeException( "Number of locked monitors = " + monitors.length + " not matched. Expected: " + OWNED_MONITORS); } MonitorInfo m = monitors[0]; StackTraceElement ste = m.getLockedStackFrame(); int depth = m.getLockedStackDepth(); StackTraceElement[] stacktrace = info.getStackTrace(); if (!ste.equals(stacktrace[depth])) { System.out.println("LockedStackFrame:- " + ste); System.out.println("StackTrace at " + depth + " :-" + stacktrace[depth]); throw new RuntimeException( "LockedStackFrame does not match " + "stack frame in ThreadInfo.getStackTrace"); } String className = lock.getClass().getName(); int hcode = System.identityHashCode(lock); if (!className.equals(m.getClassName()) || hcode != m.getIdentityHashCode() || !m.getLockedStackFrame().getMethodName().equals("run")) { System.out.println(info); throw new RuntimeException("MonitorInfo " + m + " doesn't match."); } LockInfo[] syncs = info.getLockedSynchronizers(); if (syncs.length != OWNED_SYNCS) { throw new RuntimeException( "Number of locked syncs = " + syncs.length + " not matched. Expected: " + OWNED_SYNCS); } AbstractOwnableSynchronizer s = mutex.getSync(); String lockName = s.getClass().getName(); hcode = System.identityHashCode(s); if (!lockName.equals(syncs[0].getClassName())) { throw new RuntimeException( "LockInfo : " + syncs[0] + " class name not matched. Expected: " + lockName); } if (hcode != syncs[0].getIdentityHashCode()) { throw new RuntimeException( "LockInfo: " + syncs[0] + " IdentityHashCode not matched. Expected: " + hcode); } LockInfo li = info.getLockInfo(); if (li == null) { throw new RuntimeException("Expected non-null LockInfo"); } }
// To be called exactly twice by the child process public static void rendezvousChild() { try { for (int i = 0; i < 100; i++) { System.gc(); System.runFinalization(); Thread.sleep(50); } System.out.write((byte) '\n'); System.out.flush(); System.in.read(); } catch (Throwable t) { throw new Error(t); } }
final void test() throws Exception { Future[] futures = new Future[nthreads]; for (int i = 0; i < nthreads; ++i) futures[i] = pool.submit(this); barrier.await(); Thread.sleep(TIMEOUT); boolean tooLate = false; for (int i = 1; i < nthreads; ++i) { if (!futures[i].cancel(true)) tooLate = true; // Unbunch some of the cancels if ((i & 3) == 0) Thread.sleep(1 + rng.next() % 10); } Object f0 = futures[0].get(); if (!tooLate) { for (int i = 1; i < nthreads; ++i) { if (!futures[i].isDone() || !futures[i].isCancelled()) throw new Error("Only one thread should complete"); } } else System.out.print("(cancelled too late) "); long endTime = System.nanoTime(); long time = endTime - timer.startTime; if (print) { double secs = (double) (time) / 1000000000.0; System.out.println("\t " + secs + "s run time"); } }
protected boolean interpolatePath() { long timeNow = System.currentTimeMillis(); PathLocAndDir locAndDir = pathState.interpolatePath(timeNow); long oid = obj.getOid(); // if (locAndDir != null) { // if (Log.loggingDebug) { // Log.debug("BaseBehavior.interpolatePath: oid = " + oid + "; loc = " + // locAndDir.getLoc() + "; dir = " + locAndDir.getDir()); // } // } // else { // if (Log.loggingDebug) // Log.debug("BaseBehavior.interpolatePath: oid = " + oid + "; locAndDir is // null"); // } if (locAndDir == null) { // We have arrived - - turn off interpolation, and cancel that path interpolatingPath = false; if (Log.loggingDebug) Log.debug( "BaseBehavior.interpolatePath: cancelling path: oid = " + oid + "; myLoc = " + obj.getWorldNode().getLoc()); cancelPathInterpolator(oid); obj.getWorldNode().setDir(new MVVector(0, 0, 0)); } else { obj.getWorldNode() .setPathInterpolatorValues( timeNow, locAndDir.getDir(), locAndDir.getLoc(), locAndDir.getOrientation()); MobManagerPlugin.getTracker(obj.getInstanceOid()).updateEntity(obj); } return interpolatingPath; }
/** * Spins/blocks until node s is matched by a fulfill operation. * * @param s the waiting node * @param timed true if timed wait * @param nanos timeout value * @return matched node, or s if cancelled */ SNode awaitFulfill(SNode s, boolean timed, long nanos) { /* * When a node/thread is about to block, it sets its waiter * field and then rechecks state at least one more time * before actually parking, thus covering race vs * fulfiller noticing that waiter is non-null so should be * woken. * * When invoked by nodes that appear at the point of call * to be at the head of the stack, calls to park are * preceded by spins to avoid blocking when producers and * consumers are arriving very close in time. This can * happen enough to bother only on multiprocessors. * * The order of checks for returning out of main loop * reflects fact that interrupts have precedence over * normal returns, which have precedence over * timeouts. (So, on timeout, one last check for match is * done before giving up.) Except that calls from untimed * SynchronousQueue.{poll/offer} don't check interrupts * and don't wait at all, so are trapped in transfer * method rather than calling awaitFulfill. */ long lastTime = timed ? System.nanoTime() : 0; Thread w = Thread.currentThread(); SNode h = head; int spins = (shouldSpin(s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0); for (; ; ) { if (w.isInterrupted()) s.tryCancel(); SNode m = s.match; if (m != null) return m; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { s.tryCancel(); continue; } } if (spins > 0) spins = shouldSpin(s) ? (spins - 1) : 0; else if (s.waiter == null) s.waiter = w; // establish waiter so can park next iter else if (!timed) LockSupport.park(this); else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos); } }
/** Initializes a random variable */ private void createRandom() { this.random = new Random( System.currentTimeMillis() + Thread.currentThread().getId() - Thread.currentThread().hashCode() + this.cname.hashCode()); }
public static void main(String[] args) throws Throwable { final ReentrantLock lock = new ReentrantLock(); lock.lock(); final ReentrantReadWriteLock rwlock = new ReentrantReadWriteLock(); final ReentrantReadWriteLock.ReadLock readLock = rwlock.readLock(); final ReentrantReadWriteLock.WriteLock writeLock = rwlock.writeLock(); rwlock.writeLock().lock(); final BlockingQueue<Object> q = new LinkedBlockingQueue<Object>(); final Semaphore fairSem = new Semaphore(0, true); final Semaphore unfairSem = new Semaphore(0, false); // final int threads = // rnd.nextInt(Runtime.getRuntime().availableProcessors() + 1) + 1; final int threads = 3; // On Linux, this test runs very slowly for some reason, // so use a smaller number of iterations. // Solaris can handle 1 << 18. // On the other hand, jmap is much slower on Solaris... final int iterations = 1 << 8; final CyclicBarrier cb = new CyclicBarrier(threads + 1); for (int i = 0; i < threads; i++) new Thread() { public void run() { try { final Random rnd = new Random(); for (int j = 0; j < iterations; j++) { if (j == iterations / 10 || j == iterations - 1) { cb.await(); // Quiesce cb.await(); // Resume } // int t = rnd.nextInt(2000); int t = rnd.nextInt(900); check(!lock.tryLock(t, NANOSECONDS)); check(!readLock.tryLock(t, NANOSECONDS)); check(!writeLock.tryLock(t, NANOSECONDS)); equal(null, q.poll(t, NANOSECONDS)); check(!fairSem.tryAcquire(t, NANOSECONDS)); check(!unfairSem.tryAcquire(t, NANOSECONDS)); } } catch (Throwable t) { unexpected(t); } } }.start(); cb.await(); // Quiesce rendezvousChild(); // Measure cb.await(); // Resume cb.await(); // Quiesce rendezvousChild(); // Measure cb.await(); // Resume System.exit(failed); }
private boolean discardAvailableConnections(long timeout, TimeUnit unit) throws InterruptedException { long start = System.nanoTime(); boolean success = true; for (Connection connection : connections) { success &= connection.close(timeout - Cluster.timeSince(start, unit), unit); open.decrementAndGet(); } return success; }
public void saveManager() throws FileNotFoundException, IOException { String workingDir = System.getProperty("user.dir"); FileOutputStream fos = new FileOutputStream(workingDir + "\\Manager.ser"); ObjectOutputStream oos = new ObjectOutputStream(fos); try { oos.writeObject(this); oos.close(); } catch (IOException e) { System.out.println("\nerror saving file!\n"); } }
protected static Object[] extractMockArguments(Object[] args) { int i = 7; if (args.length > i) { Object[] mockArgs = new Object[args.length - i]; System.arraycopy(args, i, mockArgs, 0, mockArgs.length); return mockArgs; } return EMPTY_ARGS; }
public Manager loadManager() throws IOException, ClassNotFoundException { try { String workingDir = System.getProperty("user.dir"); FileInputStream fin = new FileInputStream(workingDir + "\\Manager.ser"); ObjectInputStream oin = new ObjectInputStream(fin); Manager m = (Manager) oin.readObject(); if (m != null) return m; else return new Manager(new Warehouse()); } catch (IOException e) { System.out.println("\nerror loading file!\n"); return new Manager(new Warehouse()); } }
@Override public void close() throws BlockStoreException { try { buffer.force(); if (System.getProperty("os.name").toLowerCase().contains("win")) { log.info("Windows mmap hack: Forcing buffer cleaning"); WindowsMMapHack.forceRelease(buffer); } buffer = null; // Allow it to be GCd and the underlying file mapping to go away. randomAccessFile.close(); } catch (IOException e) { throw new BlockStoreException(e); } }
static void realMain(String[] args) throws Throwable { // jmap doesn't work on Windows if (System.getProperty("os.name").startsWith("Windows")) return; final String childClassName = Job.class.getName(); final String classToCheckForLeaks = Job.classToCheckForLeaks(); final String uniqueID = String.valueOf(new Random().nextInt(Integer.MAX_VALUE)); final String[] jobCmd = { java, "-Xmx8m", "-classpath", System.getProperty("test.classes", "."), childClassName, uniqueID }; final Process p = new ProcessBuilder(jobCmd).start(); final String childPid = match( commandOutputOf(jps, "-m"), "(?m)^ *([0-9]+) +\\Q" + childClassName + "\\E *" + uniqueID + "$", 1); final int n0 = objectsInUse(p, childPid, classToCheckForLeaks); final int n1 = objectsInUse(p, childPid, classToCheckForLeaks); equal(p.waitFor(), 0); equal(p.exitValue(), 0); failed += p.exitValue(); // Check that no objects were leaked. System.out.printf("%d -> %d%n", n0, n1); check(Math.abs(n1 - n0) < 2); // Almost always n0 == n1 check(n1 < 20); drainers.shutdown(); }
private void generateCNAME() { String hostname; if (this.mcSession) { hostname = this.rtpMCSock.getLocalAddress().getCanonicalHostName(); } else { hostname = this.rtpSock.getLocalAddress().getCanonicalHostName(); } // if(hostname.equals("0.0.0.0") && System.getenv("HOSTNAME") != null) { // hostname = System.getenv("HOSTNAME"); // } cname = System.getProperty("user.name") + "@" + hostname; }
/** * Spins/blocks until node s is fulfilled. * * @param s the waiting node * @param e the comparison value for checking match * @param timed true if timed wait * @param nanos timeout value * @return matched item, or s if cancelled */ Object awaitFulfill(QNode s, Object e, boolean timed, long nanos) { /* Same idea as TransferStack.awaitFulfill */ long lastTime = timed ? System.nanoTime() : 0; Thread w = Thread.currentThread(); int spins = ((head.next == s) ? (timed ? maxTimedSpins : maxUntimedSpins) : 0); for (; ; ) { if (w.isInterrupted()) s.tryCancel(e); Object x = s.item; if (x != e) return x; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { s.tryCancel(e); continue; } } if (spins > 0) --spins; else if (s.waiter == null) s.waiter = w; else if (!timed) LockSupport.park(this); else if (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos); } }
@Override public void write(int ireg, byte[] data, boolean waitForCompletion) { try { synchronized (this.concurrentClientLock) { synchronized (this.callbackLock) { // Wait until we can write to the write cache while (this.writeCacheStatus != WRITE_CACHE_STATUS.IDLE) { this.callbackLock.wait(); } // Indicate where we want to write this.iregWriteFirst = ireg; this.cregWrite = data.length; // Indicate we are dirty so the callback will write us out this.writeCacheStatus = WRITE_CACHE_STATUS.DIRTY; // Provide the data we want to write this.writeCacheLock.lockInterruptibly(); try { System.arraycopy(data, 0, this.writeCache, dibCacheOverhead, data.length); } finally { this.writeCacheLock.unlock(); } // Let the callback know we've got new data for him this.callback.onNewDataToWrite(); if (waitForCompletion) { // Wait until the write at least issues to the device controller. This will // help make any delays/sleeps that follow a write() be more deterministically // relative to the actual I2C device write. while (writeCacheStatus != WRITE_CACHE_STATUS.IDLE) { this.callbackLock.wait(); } } } } } catch (InterruptedException e) { Util.handleCapturedInterrupt(e); } }
protected long setupPathInterpolator( long oid, Point myLoc, Point dest, boolean follow, boolean followsTerrain) { long timeNow = System.currentTimeMillis(); WorldManagerClient.MobPathReqMessage reqMsg = pathState.setupPathInterpolator(timeNow, myLoc, dest, mobSpeed, follow, followsTerrain); if (reqMsg != null) { try { Engine.getAgent().sendBroadcast(reqMsg); if (Log.loggingDebug) Log.debug("BaseBehavior.setupPathInterpolator: send MobPathReqMessage " + reqMsg); } catch (Exception e) { throw new RuntimeException(e); } interpolatingPath = true; return pathState.pathTimeRemaining(); } else { interpolatingPath = false; return 0; } }
public static void startRDPServer() { if (rdpServerStarted) return; rdpServerStarted = true; rdpServerThread = new Thread(rdpServer, "RDPServer"); retryThread = new Thread(new RetryThread(), "RDPRetry"); packetCallbackThread = new Thread(new PacketCallbackThread(), "RDPCallback"); if (Log.loggingNet) Log.net("static - starting rdpserver thread"); try { selector = Selector.open(); } catch (Exception e) { Log.exception("RDPServer caught exception opening selector", e); System.exit(1); } rdpServerThread.setPriority(rdpServerThread.getPriority() + 2); if (Log.loggingDebug) Log.debug( "RDPServer: starting rdpServerThread with priority " + rdpServerThread.getPriority()); rdpServerThread.start(); retryThread.start(); packetCallbackThread.start(); }
private Connection waitForConnection(long timeout, TimeUnit unit) throws ConnectionException, TimeoutException { long start = System.nanoTime(); long remaining = timeout; do { try { awaitAvailableConnection(remaining, unit); } catch (InterruptedException e) { Thread.currentThread().interrupt(); // If we're interrupted fine, check if there is a connection available but stop waiting // otherwise timeout = 0; // this will make us stop the loop if we don't get a connection right away } if (isShutdown()) throw new ConnectionException(host.getAddress(), "Pool is shutdown"); int minInFlight = Integer.MAX_VALUE; Connection leastBusy = null; for (Connection connection : connections) { int inFlight = connection.inFlight.get(); if (inFlight < minInFlight) { minInFlight = inFlight; leastBusy = connection; } } while (true) { int inFlight = leastBusy.inFlight.get(); if (inFlight >= Connection.MAX_STREAM_PER_CONNECTION) break; if (leastBusy.inFlight.compareAndSet(inFlight, inFlight + 1)) return leastBusy; } remaining = timeout - Cluster.timeSince(start, unit); } while (remaining > 0); throw new TimeoutException(); }
static String javahome() { String jh = System.getProperty("java.home"); return (jh.endsWith("jre")) ? jh.substring(0, jh.length() - 4) : jh; }
public void run() { // every second, go through all the packets that havent been // ack'd List<RDPConnection> conList = new LinkedList<RDPConnection>(); long lastCounterTime = System.currentTimeMillis(); while (true) { try { long startTime = System.currentTimeMillis(); long interval = startTime - lastCounterTime; if (interval > 1000) { if (Log.loggingNet) { Log.net( "RDPServer counters: activeChannelCalls " + activeChannelCalls + ", selectCalls " + selectCalls + ", transmits " + transmits + ", retransmits " + retransmits + " in " + interval + "ms"); } activeChannelCalls = 0; selectCalls = 0; transmits = 0; retransmits = 0; lastCounterTime = startTime; } if (Log.loggingNet) Log.net("RDPServer.RETRY: startTime=" + startTime); // go through all the rdpconnections and re-send any // unacked packets conList.clear(); lock.lock(); try { // make a copy since the values() collection is // backed by the map Set<RDPConnection> conCol = RDPServer.getAllConnections(); if (conCol == null) { throw new MVRuntimeException("values() returned null"); } conList.addAll(conCol); // make non map backed copy } finally { lock.unlock(); } Iterator<RDPConnection> iter = conList.iterator(); while (iter.hasNext()) { RDPConnection con = iter.next(); long currentTime = System.currentTimeMillis(); // is the connection in CLOSE_WAIT if (con.getState() == RDPConnection.CLOSE_WAIT) { long closeTime = con.getCloseWaitTimer(); long elapsedTime = currentTime - closeTime; Log.net( "RDPRetryThread: con is in CLOSE_WAIT: elapsed close timer(ms)=" + elapsedTime + ", waiting for 30seconds to elapse. con=" + con); if (elapsedTime > 30000) { // close the connection Log.net("RDPRetryThread: removing CLOSE_WAIT connection. con=" + con); removeConnection(con); } else { Log.net( "RDPRetryThread: time left on CLOSE_WAIT timer: " + (30000 - (currentTime - closeTime))); } // con.close(); continue; } if (Log.loggingNet) Log.net( "RDPServer.RETRY: resending expired packets " + con + " - current list size = " + con.unackListSize()); // see if we should send a null packet, but only if con is already open if ((con.getState() == RDPConnection.OPEN) && ((currentTime - con.getLastNullPacketTime()) > 30000)) { con.getLock().lock(); try { RDPPacket nulPacket = RDPPacket.makeNulPacket(); con.sendPacketImmediate(nulPacket, false); con.setLastNullPacketTime(); if (Log.loggingNet) Log.net("RDPServer.retry: sent nul packet: " + nulPacket); } finally { con.getLock().unlock(); } } else { if (Log.loggingNet) Log.net( "RDPServer.retry: sending nul packet in " + (30000 - (currentTime - con.getLastNullPacketTime()))); } con.resend( currentTime - resendTimerMS, // resend cutoff time currentTime - resendTimeoutMS); // giveup time } long endTime = System.currentTimeMillis(); if (Log.loggingNet) Log.net( "RDPServer.RETRY: endTime=" + endTime + ", elapse(ms)=" + (endTime - startTime)); Thread.sleep(250); } catch (Exception e) { Log.exception("RDPServer.RetryThread.run caught exception", e); } } }
void updateStateMachines(UPDATE_STATE_MACHINE caller) // We've got quite the little state machine here! { synchronized (callbackLock) { // ---------------------------------------------------------------------------------- // If we're calling from other than the callback (in which we *know* the port is // ready), we need to check whether things are currently busy. We defer until // later if they are. if (caller == UPDATE_STATE_MACHINE.FROM_USER_WRITE) { if (!i2cDevice.isI2cPortReady() || callbackThread == null) return; // Optimized calling from user mode is not yet implemented return; } // ---------------------------------------------------------------------------------- // Some ancillary bookkeeping if (caller == UPDATE_STATE_MACHINE.FROM_CALLBACK) { // Capture the current callback thread if we haven't already if (callbackThread == null) { callbackThread = Thread.currentThread(); callbackThreadOriginalPriority = callbackThread.getPriority(); } else assertTrue( !BuildConfig.DEBUG || callbackThread.getId() == Thread.currentThread().getId()); // Set the thread name to make the system more debuggable if (0 == hardwareCycleCount) Thread.currentThread().setName(String.format("RWLoop(%s)", i2cDevice.getDeviceName())); // Adjust the target thread priority. Note that we only ever adjust it upwards, // not downwards, because in reality the thread is shared by other I2C objects // on the same controller and we don't want to fight with their understanding // of what the priority should be. int targetPriority = callbackThreadOriginalPriority + callbackThreadPriorityBoost; if (callbackThread.getPriority() < targetPriority) { try { callbackThread.setPriority(targetPriority); } catch (Exception e) { /* ignore: just run as is */ } } // Update cycle statistics hardwareCycleCount++; } // ---------------------------------------------------------------------------------- // Initialize state for managing state transition setActionFlag = false; queueFullWrite = false; queueRead = false; heartbeatRequired = (msHeartbeatInterval > 0 && milliseconds(timeSinceLastHeartbeat) >= msHeartbeatInterval); enabledReadMode = false; enabledWriteMode = false; prevReadCacheStatus = readCacheStatus; prevWriteCacheStatus = writeCacheStatus; prevModeCacheStatus = modeCacheStatus; // ---------------------------------------------------------------------------------- // Handle the state machine if (caller == UPDATE_STATE_MACHINE.FROM_CALLBACK) { // -------------------------------------------------------------------------- // Deal with the fact that we've completed any previous queueing operation if (modeCacheStatus == MODE_CACHE_STATUS.QUEUED) modeCacheStatus = MODE_CACHE_STATUS.IDLE; if (readCacheStatus == READ_CACHE_STATUS.QUEUED || readCacheStatus == READ_CACHE_STATUS.VALID_QUEUED) { readCacheStatus = READ_CACHE_STATUS.QUEUE_COMPLETED; nanoTimeReadCacheValid = System.nanoTime(); } if (writeCacheStatus == WRITE_CACHE_STATUS.QUEUED) { writeCacheStatus = WRITE_CACHE_STATUS.IDLE; // Our write mode status should have been reported back to us assertTrue(!BuildConfig.DEBUG || i2cDevice.isI2cPortInWriteMode()); } // -------------------------------------------------------------------------- // That limits the number of states the caches can now be in assertTrue( !BuildConfig.DEBUG || (readCacheStatus == READ_CACHE_STATUS.IDLE || readCacheStatus == READ_CACHE_STATUS.SWITCHINGTOREADMODE || readCacheStatus == READ_CACHE_STATUS.VALID_ONLYONCE || readCacheStatus == READ_CACHE_STATUS.QUEUE_COMPLETED)); assertTrue( !BuildConfig.DEBUG || (writeCacheStatus == WRITE_CACHE_STATUS.IDLE || writeCacheStatus == WRITE_CACHE_STATUS.DIRTY)); // -------------------------------------------------------------------------- // Complete any read mode switch if there is one if (readCacheStatus == READ_CACHE_STATUS.SWITCHINGTOREADMODE) { // We're trying to switch into read mode. Are we there yet? if (i2cDevice.isI2cPortInReadMode()) { // See also below XYZZY readCacheStatus = READ_CACHE_STATUS.QUEUED; setActionFlag = true; // actually do an I2C read queueRead = true; // read the I2C read results } else { queueRead = true; // read the mode byte } } // -------------------------------------------------------------------------- // If there's a write request pending, and it's ok to issue the write, do so else if (writeCacheStatus == WRITE_CACHE_STATUS.DIRTY) { issueWrite(); // Our ordering rules are that any reads after a write have to wait until // the write is actually sent to the hardware, so anything we've read before is junk. // Note that there's an analogous check in read(). readCacheStatus = READ_CACHE_STATUS.IDLE; } // -------------------------------------------------------------------------- // Initiate reading if we should. Be sure to honor the policy of the read mode else if (readCacheStatus == READ_CACHE_STATUS.IDLE || readWindowChanged) { if (readWindow != null && readWindow.isOkToRead()) { // We're going to read from this window. If it's an only-once, then // ensure we don't come down this path again with the same ReadWindow instance. readWindow.setReadIssued(); // You know...we might *already* have set up the controller to read what we want. // Maybe the previous read was a one-shot, for example. if (readWindowSentToController != null && readWindowSentToController.contains(readWindow) && i2cDevice.isI2cPortInReadMode()) { // Lucky us! We can go ahead and queue the read right now! // See also above XYZZY readWindowActuallyRead = readWindowSentToController; readCacheStatus = READ_CACHE_STATUS.QUEUED; setActionFlag = true; // actually do an I2C read queueRead = true; // read the results of the read } else { // We'll start switching now, and queue the read later readWindowActuallyRead = readWindow; startSwitchingToReadMode(readWindow); } } else { // There's nothing to read. Make *sure* we are idle. readCacheStatus = READ_CACHE_STATUS.IDLE; } readWindowChanged = false; } // -------------------------------------------------------------------------- // Reissue any previous read if we should. The only way we are here and // see READ_CACHE_STATUS.QUEUE_COMPLETED is if we completed a queuing operation // above. else if (readCacheStatus == READ_CACHE_STATUS.QUEUE_COMPLETED) { if (readWindow != null && readWindow.isOkToRead()) { readCacheStatus = READ_CACHE_STATUS.VALID_QUEUED; setActionFlag = true; // actually do an I2C read queueRead = true; // read the results of the read } else { readCacheStatus = READ_CACHE_STATUS.VALID_ONLYONCE; } } // -------------------------------------------------------------------------- // Completing the possibilities: else if (readCacheStatus == READ_CACHE_STATUS.VALID_ONLYONCE) { // Just leave it there until someone reads it } // ---------------------------------------------------------------------------------- // Ok, after all that we finally know what how we're required to // interact with the device controller according to what we've been // asked to read or write. But what, now, about heartbeats? if (!setActionFlag && heartbeatRequired) { if (heartbeatAction != null) { if (readWindowSentToController != null && heartbeatAction.rereadLastRead) { // Controller is in or is switching to read mode. If he's there // yet, then issue an I2C read; if he's not, then he soon will be. if (i2cDevice.isI2cPortInReadMode()) { setActionFlag = true; // issue an I2C read } else { assertTrue( !BuildConfig.DEBUG || readCacheStatus == READ_CACHE_STATUS.SWITCHINGTOREADMODE); } } else if (readWindowSentToControllerInitialized && readWindowSentToController == null && heartbeatAction.rewriteLastWritten) { // Controller is in write mode, and the write cache has what we last wrote queueFullWrite = true; setActionFlag = true; // issue an I2C write } else if (heartbeatAction.heartbeatReadWindow != null) { // The simplest way to do this is just to do a new read from the outside, as that // means it has literally zero impact here on our state machine. That unfortunately // introduces concurrency where otherwise none might exist, but that's ONLY if you // choose this flavor of heartbeat, so that's a reasonable tradeoff. final ReadWindow window = heartbeatAction .heartbeatReadWindow; // capture here while we still have the lock Thread thread = new Thread( new Runnable() { @Override public void run() { try { I2cDeviceClient.this.read(window.getIregFirst(), window.getCreg()); } catch (Exception e) // paranoia { // ignored } } }); // Start the thread a-going. It will run relatively quickly and then shut down thread.setName("I2C heartbeat read thread"); thread.setPriority(heartbeatAction.explicitReadPriority); thread.start(); } } } if (setActionFlag) { // We're about to communicate on I2C right now, so reset the heartbeat. // Note that we reset() *before* we talk to the device so as to do // conservative timing accounting. timeSinceLastHeartbeat.reset(); } } else if (caller == UPDATE_STATE_MACHINE.FROM_USER_WRITE) { // There's nothing we know to do that would speed things up, so we // just do nothing here and wait until the next portIsReady() callback. } // ---------------------------------------------------------------------------------- // Read, set action flag and / or queue to module as requested if (setActionFlag) i2cDevice.setI2cPortActionFlag(); else clearActionFlag(); if (setActionFlag && !queueFullWrite) { i2cDevice.writeI2cPortFlagOnlyToController(); } else if (queueFullWrite) { i2cDevice.writeI2cCacheToController(); // if (modeCacheStatus == MODE_CACHE_STATUS.DIRTY) modeCacheStatus = MODE_CACHE_STATUS.QUEUED; } // Queue a read after queuing any write for a bit of paranoia: if we're mode switching // to write, we want that write to go out first, THEN read the mode status. It probably // would anyway, but why not... if (queueRead) { i2cDevice.readI2cCacheFromController(); } // ---------------------------------------------------------------------------------- // Do logging if (loggingEnabled) { StringBuilder message = new StringBuilder(); switch (caller) { case FROM_CALLBACK: message.append(String.format("cyc %d", hardwareCycleCount)); break; case FROM_USER_WRITE: message.append(String.format("usr write")); break; } if (setActionFlag) message.append("|flag"); if (setActionFlag && !queueFullWrite) message.append("|f"); else if (queueFullWrite) message.append("|w"); else message.append("|."); if (queueRead) message.append("|r"); if (readCacheStatus != prevReadCacheStatus) message.append( "| R." + prevReadCacheStatus.toString() + "->" + readCacheStatus.toString()); if (writeCacheStatus != prevWriteCacheStatus) message.append( "| W." + prevWriteCacheStatus.toString() + "->" + writeCacheStatus.toString()); // if (modeCacheStatus != prevModeCacheStatus) message.append("| M." + // prevModeCacheStatus.toString() + "->" + modeCacheStatus.toString()); if (enabledWriteMode) message.append(String.format("| setWrite(0x%02x,%d)", iregWriteFirst, cregWrite)); if (enabledReadMode) message.append( String.format( "| setRead(0x%02x,%d)", readWindow.getIregFirst(), readWindow.getCreg())); log(Log.DEBUG, message.toString()); } // ---------------------------------------------------------------------------------- // Notify anyone blocked in read() or write() callbackLock.notifyAll(); } }
/** * Send data to all participants registered as receivers, using the current timeStamp and payload * type. The RTP timestamp will be the same for all the packets. * * @param buffers A buffer of bytes, should not bed padded and less than 1500 bytes on most * networks. * @param csrcArray an array with the SSRCs of contributing sources * @param markers An array indicating what packets should be marked. Rarely anything but the first * one * @param rtpTimestamp The RTP timestamp to be applied to all packets * @param seqNumbers An array with the sequence number associated with each byte[] * @return null if there was a problem sending the packets, 2-dim array with {RTP Timestamp, * Sequence number} */ public long[][] sendData( byte[][] buffers, long[] csrcArray, boolean[] markers, long rtpTimestamp, long[] seqNumbers) { logger.debug("-> RTPSession.sendData(byte[])"); // Same RTP timestamp for all if (rtpTimestamp < 0) rtpTimestamp = System.currentTimeMillis(); // Return values long[][] ret = new long[buffers.length][2]; for (int i = 0; i < buffers.length; i++) { byte[] buf = buffers[i]; boolean marker = false; if (markers != null) marker = markers[i]; if (buf.length > 1500) { System.out.println( "RTPSession.sendData() called with buffer exceeding 1500 bytes (" + buf.length + ")"); } // Get the return values ret[i][0] = rtpTimestamp; if (seqNumbers == null) { ret[i][1] = getNextSeqNum(); } else { ret[i][1] = seqNumbers[i]; } // Create a new RTP Packet RtpPkt pkt = new RtpPkt(rtpTimestamp, this.ssrc, (int) ret[i][1], this.payloadType, buf); if (csrcArray != null) pkt.setCsrcs(csrcArray); pkt.setMarked(marker); // Creates a raw packet byte[] pktBytes = pkt.encode(); // System.out.println(Integer.toString(StaticProcs.bytesToUIntInt(pktBytes, 2))); // Pre-flight check, are resolving an SSRC conflict? if (this.conflict) { System.out.println("RTPSession.sendData() called while trying to resolve conflict."); return null; } if (this.mcSession) { DatagramPacket packet = null; try { packet = new DatagramPacket(pktBytes, pktBytes.length, this.mcGroup, this.rtpMCSock.getPort()); } catch (Exception e) { System.out.println("RTPSession.sendData() packet creation failed."); e.printStackTrace(); return null; } try { rtpMCSock.send(packet); // Debug if (this.debugAppIntf != null) { this.debugAppIntf.packetSent( 1, (InetSocketAddress) packet.getSocketAddress(), new String( "Sent multicast RTP packet of size " + packet.getLength() + " to " + packet.getSocketAddress().toString() + " via " + rtpMCSock.getLocalSocketAddress().toString())); } } catch (Exception e) { System.out.println("RTPSession.sendData() multicast failed."); e.printStackTrace(); return null; } } else { // Loop over recipients Iterator<Participant> iter = partDb.getUnicastReceivers(); while (iter.hasNext()) { InetSocketAddress receiver = iter.next().rtpAddress; DatagramPacket packet = null; logger.debug(" Sending to {}", receiver); try { packet = new DatagramPacket(pktBytes, pktBytes.length, receiver); } catch (Exception e) { System.out.println("RTPSession.sendData() packet creation failed."); e.printStackTrace(); return null; } // Actually send the packet try { rtpSock.send(packet); // Debug if (this.debugAppIntf != null) { this.debugAppIntf.packetSent( 0, (InetSocketAddress) packet.getSocketAddress(), new String( "Sent unicast RTP packet of size " + packet.getLength() + " to " + packet.getSocketAddress().toString() + " via " + rtpSock.getLocalSocketAddress().toString())); } } catch (Exception e) { System.out.println("RTPSession.sendData() unicast failed."); e.printStackTrace(); return null; } } } // Update our stats this.sentPktCount++; this.sentOctetCount++; logger.info("<- RTPSession.sendData(byte[])", pkt.getSeqNumber()); } return ret; }