/** RaceDet: get recursion count, assuming lock is held by current thread */ @Uninterruptible @NoNullCheck @Inline public static final int getRecursionCountLocked(Object o, Offset lockOffset) { if (VM.VerifyAssertions) { VM._assert(holdsLock(o, lockOffset, RVMThread.getCurrentThread())); } Word bits = Magic.getWordAtOffset(o, lockOffset); int count; if (bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT)) { // if locked, then it is locked with a fat lock Lock l = Lock.getLock(getLockIndex(bits)); l.mutex.lock(); count = l.getRecursionCount(); l.mutex.unlock(); } else { if (VM.VerifyAssertions) { VM._assert( bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE) || bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN)); } count = getRecCount(bits); } if (VM.VerifyAssertions) { VM._assert(count > 0); } return count; }
@Uninterruptible @NoNullCheck public static boolean holdsLock(Object o, Offset lockOffset, RVMThread thread) { for (int cnt = 0; ; ++cnt) { int tid = thread.getLockingId(); Word bits = Magic.getWordAtOffset(o, lockOffset); if (bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) { // if locked, then it is locked with a thin lock return bits.and(TL_THREAD_ID_MASK).toInt() == tid && !bits.and(TL_LOCK_COUNT_MASK).isZero(); } else if (bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) { return bits.and(TL_THREAD_ID_MASK).toInt() == tid; } else { if (VM.VerifyAssertions) VM._assert(bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT)); // if locked, then it is locked with a fat lock Lock l = Lock.getLock(getLockIndex(bits)); if (l != null) { l.mutex.lock(); boolean result = (l.getOwnerId() == tid && l.getLockedObject() == o); l.mutex.unlock(); return result; } } RVMThread.yield(); } }
/** * Obtains the heavy-weight lock, if there is one, associated with the indicated object. Returns * <code>null</code>, if there is no heavy-weight lock associated with the object. * * @param o the object from which a lock is desired * @param lockOffset the offset of the thin lock word in the object. * @param create if true, create heavy lock if none found * @return the heavy-weight lock on the object (if any) */ @Unpreemptible public static Lock getHeavyLock(Object o, Offset lockOffset, boolean create) { Word old = Magic.getWordAtOffset(o, lockOffset); if (isFat(old)) { // already a fat lock in place return Lock.getLock(getLockIndex(old)); } else if (create) { return inflate(o, lockOffset); } else { return null; } }
@NoInline @NoNullCheck @Unpreemptible public static void unlock(Object o, Offset lockOffset) { Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId()); for (int cnt = 0; ; cnt++) { Word old = Magic.getWordAtOffset(o, lockOffset); Word stat = old.and(TL_STAT_MASK); if (stat.EQ(TL_STAT_BIASABLE)) { Word id = old.and(TL_THREAD_ID_MASK); if (id.EQ(threadId)) { if (old.and(TL_LOCK_COUNT_MASK).isZero()) { RVMThread.raiseIllegalMonitorStateException( "biased unlocking: we own this object but the count is already zero", o); } setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT)); return; } else { RVMThread.raiseIllegalMonitorStateException( "biased unlocking: we don't own this object", o); } } else if (stat.EQ(TL_STAT_THIN)) { Magic.sync(); Word id = old.and(TL_THREAD_ID_MASK); if (id.EQ(threadId)) { Word changed; if (old.and(TL_LOCK_COUNT_MASK).isZero()) { changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN); } else { changed = old.minus(TL_LOCK_COUNT_UNIT); } if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) { return; } } else { if (false) { VM.sysWriteln("threadId = ", threadId); VM.sysWriteln("id = ", id); } RVMThread.raiseIllegalMonitorStateException( "thin unlocking: we don't own this object", o); } } else { if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT)); // fat unlock Lock.getLock(getLockIndex(old)).unlockHeavy(o); return; } } }
/** * Promotes a light-weight lock to a heavy-weight lock. If this returns the lock that you gave it, * its mutex will be locked; otherwise, its mutex will be unlocked. Hence, calls to this method * should always be followed by a condition lock() or unlock() call. * * @param o the object to get a heavy-weight lock * @param lockOffset the offset of the thin lock word in the object. * @return the inflated lock; either the one you gave, or another one, if the lock was inflated by * some other thread. */ @NoNullCheck @Unpreemptible protected static Lock attemptToInflate(Object o, Offset lockOffset, Lock l) { if (false) VM.sysWriteln("l = ", Magic.objectAsAddress(l)); l.mutex.lock(); for (int cnt = 0; ; ++cnt) { Word bits = Magic.getWordAtOffset(o, lockOffset); // check to see if another thread has already created a fat lock if (isFat(bits)) { if (trace) { VM.sysWriteln( "Thread #", RVMThread.getCurrentThreadSlot(), ": freeing lock ", Magic.objectAsAddress(l), " because we had a double-inflate"); } Lock result = Lock.getLock(getLockIndex(bits)); if (result == null || result.lockedObject != o) { continue; /* this is nasty. this will happen when a lock is deflated. */ } Lock.free(l); l.mutex.unlock(); return result; } if (VM.VerifyAssertions) VM._assert(l != null); if (attemptToMarkInflated(o, lockOffset, bits, l.index, cnt)) { l.setLockedObject(o); l.setOwnerId(getLockOwner(bits)); if (l.getOwnerId() != 0) { l.setRecursionCount(getRecCount(bits)); } else { if (VM.VerifyAssertions) VM._assert(l.getRecursionCount() == 0); } return l; } // contention detected, try again } }
@NoInline @NoNullCheck @Unpreemptible public static void lock(Object o, Offset lockOffset) { if (STATS) fastLocks++; Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId()); for (int cnt = 0; ; cnt++) { Word old = Magic.getWordAtOffset(o, lockOffset); Word stat = old.and(TL_STAT_MASK); boolean tryToInflate = false; if (stat.EQ(TL_STAT_BIASABLE)) { Word id = old.and(TL_THREAD_ID_MASK); if (id.isZero()) { if (ENABLE_BIASED_LOCKING) { // lock is unbiased, bias it in our favor and grab it if (Synchronization.tryCompareAndSwap( o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) { Magic.isync(); return; } } else { // lock is unbiased but biasing is NOT allowed, so turn it into // a thin lock if (Synchronization.tryCompareAndSwap( o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) { Magic.isync(); return; } } } else if (id.EQ(threadId)) { // lock is biased in our favor Word changed = old.plus(TL_LOCK_COUNT_UNIT); if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) { setDedicatedU16(o, lockOffset, changed); return; } else { tryToInflate = true; } } else { if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) { continue; // don't spin, since it's thin now } } } else if (stat.EQ(TL_STAT_THIN)) { Word id = old.and(TL_THREAD_ID_MASK); if (id.isZero()) { if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) { Magic.isync(); return; } } else if (id.EQ(threadId)) { Word changed = old.plus(TL_LOCK_COUNT_UNIT); if (changed.and(TL_LOCK_COUNT_MASK).isZero()) { tryToInflate = true; } else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) { Magic.isync(); return; } } else if (cnt > retryLimit) { tryToInflate = true; } } else { if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT)); // lock is fat. contend on it. if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) { return; } } if (tryToInflate) { if (STATS) slowLocks++; // the lock is not fat, is owned by someone else, or else the count wrapped. // attempt to inflate it (this may fail, in which case we'll just harmlessly // loop around) and lock it (may also fail, if we get the wrong lock). if it // succeeds, we're done. // NB: this calls into our attemptToMarkInflated() method, which will do the // Right Thing if the lock is biased to someone else. if (inflateAndLock(o, lockOffset)) { return; } } else { RVMThread.yield(); } } }