예제 #1
0
 @Uninterruptible
 @NoNullCheck
 public static boolean holdsLock(Object o, Offset lockOffset, RVMThread thread) {
   for (int cnt = 0; ; ++cnt) {
     int tid = thread.getLockingId();
     Word bits = Magic.getWordAtOffset(o, lockOffset);
     if (bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
       // if locked, then it is locked with a thin lock
       return bits.and(TL_THREAD_ID_MASK).toInt() == tid && !bits.and(TL_LOCK_COUNT_MASK).isZero();
     } else if (bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) {
       return bits.and(TL_THREAD_ID_MASK).toInt() == tid;
     } else {
       if (VM.VerifyAssertions) VM._assert(bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT));
       // if locked, then it is locked with a fat lock
       Lock l = Lock.getLock(getLockIndex(bits));
       if (l != null) {
         l.mutex.lock();
         boolean result = (l.getOwnerId() == tid && l.getLockedObject() == o);
         l.mutex.unlock();
         return result;
       }
     }
     RVMThread.yield();
   }
 }
예제 #2
0
 /** RaceDet: get recursion count, assuming lock is held by current thread */
 @Uninterruptible
 @NoNullCheck
 @Inline
 public static final int getRecursionCountLocked(Object o, Offset lockOffset) {
   if (VM.VerifyAssertions) {
     VM._assert(holdsLock(o, lockOffset, RVMThread.getCurrentThread()));
   }
   Word bits = Magic.getWordAtOffset(o, lockOffset);
   int count;
   if (bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT)) {
     // if locked, then it is locked with a fat lock
     Lock l = Lock.getLock(getLockIndex(bits));
     l.mutex.lock();
     count = l.getRecursionCount();
     l.mutex.unlock();
   } else {
     if (VM.VerifyAssertions) {
       VM._assert(
           bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE) || bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN));
     }
     count = getRecCount(bits);
   }
   if (VM.VerifyAssertions) {
     VM._assert(count > 0);
   }
   return count;
 }
예제 #3
0
 /** scan lock queues for thread and report its state */
 @Interruptible
 public static String getThreadState(RVMThread t) {
   for (int i = 0; i < numLocks(); i++) {
     Lock l = getLock(i);
     if (l == null || !l.active) continue;
     if (l.isBlocked(t)) return ("waitingForLock(blocked)" + i);
     if (l.isWaiting(t)) return "waitingForNotification(waiting)";
   }
   return null;
 }
예제 #4
0
 /**
  * Promotes a light-weight lock to a heavy-weight lock and locks it. Note: the object in question
  * will normally be locked by another thread, or it may be unlocked. If there is already a
  * heavy-weight lock on this object, that lock is returned.
  *
  * @param o the object to get a heavy-weight lock
  * @param lockOffset the offset of the thin lock word in the object.
  * @return whether the object was successfully locked
  */
 @Unpreemptible
 private static boolean inflateAndLock(Object o, Offset lockOffset) {
   Lock l = Lock.allocate();
   if (l == null) return false; // can't allocate locks during GC
   Lock rtn = attemptToInflate(o, lockOffset, l);
   if (l != rtn) {
     l = rtn;
     l.mutex.lock();
   }
   return l.lockHeavyLocked(o);
 }
예제 #5
0
 /** Dump the lock table. */
 public static void dumpLocks() {
   for (int i = 0; i < numLocks(); i++) {
     Lock l = getLock(i);
     if (l != null) {
       l.dump();
     }
   }
   VM.sysWrite("\n");
   VM.sysWrite("lock availability stats: ");
   VM.sysWriteInt(globalLocksAllocated);
   VM.sysWrite(" locks allocated, ");
   VM.sysWriteInt(globalLocksFreed);
   VM.sysWrite(" locks freed, ");
   VM.sysWriteInt(globalFreeLocks);
   VM.sysWrite(" free locks\n");
 }
예제 #6
0
 /**
  * Return the lock index for a given lock word. Assert valid index ranges, that the fat lock bit
  * is set, and that the lock entry exists.
  *
  * @param lockWord The lock word whose lock index is being established
  * @return the lock index corresponding to the lock workd.
  */
 @Inline
 @Uninterruptible
 public static int getLockIndex(Word lockWord) {
   int index = lockWord.and(TL_LOCK_ID_MASK).rshl(TL_LOCK_ID_SHIFT).toInt();
   if (VM.VerifyAssertions) {
     if (!(index > 0 && index < Lock.numLocks())) {
       VM.sysWrite("Lock index out of range! Word: ");
       VM.sysWrite(lockWord);
       VM.sysWrite(" index: ");
       VM.sysWrite(index);
       VM.sysWrite(" locks: ");
       VM.sysWrite(Lock.numLocks());
       VM.sysWriteln();
     }
     VM._assert(index > 0 && index < Lock.numLocks()); // index is in range
     VM._assert(lockWord.and(TL_STAT_MASK).EQ(TL_STAT_FAT)); // fat lock bit is set
   }
   return index;
 }
예제 #7
0
 /**
  * Obtains the heavy-weight lock, if there is one, associated with the indicated object. Returns
  * <code>null</code>, if there is no heavy-weight lock associated with the object.
  *
  * @param o the object from which a lock is desired
  * @param lockOffset the offset of the thin lock word in the object.
  * @param create if true, create heavy lock if none found
  * @return the heavy-weight lock on the object (if any)
  */
 @Unpreemptible
 public static Lock getHeavyLock(Object o, Offset lockOffset, boolean create) {
   Word old = Magic.getWordAtOffset(o, lockOffset);
   if (isFat(old)) { // already a fat lock in place
     return Lock.getLock(getLockIndex(old));
   } else if (create) {
     return inflate(o, lockOffset);
   } else {
     return null;
   }
 }
예제 #8
0
 /**
  * Promotes a light-weight lock to a heavy-weight lock. Note: the object is question will normally
  * be locked by another thread, or it may be unlocked. If there is already a heavy-weight lock on
  * this object, that lock is returned.
  *
  * @param o the object to get a heavy-weight lock
  * @param lockOffset the offset of the thin lock word in the object.
  * @return the heavy-weight lock on this object
  */
 @Unpreemptible
 private static Lock inflate(Object o, Offset lockOffset) {
   Lock l = Lock.allocate();
   if (VM.VerifyAssertions) {
     VM._assert(
         l != null); // inflate called by wait (or notify) which shouldn't be called during GC
   }
   Lock rtn = attemptToInflate(o, lockOffset, l);
   if (rtn == l) l.mutex.unlock();
   return rtn;
 }
예제 #9
0
 @NoInline
 @NoNullCheck
 @Unpreemptible
 public static void unlock(Object o, Offset lockOffset) {
   Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
   for (int cnt = 0; ; cnt++) {
     Word old = Magic.getWordAtOffset(o, lockOffset);
     Word stat = old.and(TL_STAT_MASK);
     if (stat.EQ(TL_STAT_BIASABLE)) {
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           RVMThread.raiseIllegalMonitorStateException(
               "biased unlocking: we own this object but the count is already zero", o);
         }
         setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
         return;
       } else {
         RVMThread.raiseIllegalMonitorStateException(
             "biased unlocking: we don't own this object", o);
       }
     } else if (stat.EQ(TL_STAT_THIN)) {
       Magic.sync();
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         Word changed;
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
         } else {
           changed = old.minus(TL_LOCK_COUNT_UNIT);
         }
         if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
           return;
         }
       } else {
         if (false) {
           VM.sysWriteln("threadId = ", threadId);
           VM.sysWriteln("id = ", id);
         }
         RVMThread.raiseIllegalMonitorStateException(
             "thin unlocking: we don't own this object", o);
       }
     } else {
       if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
       // fat unlock
       Lock.getLock(getLockIndex(old)).unlockHeavy(o);
       return;
     }
   }
 }
예제 #10
0
 static void returnLock(Lock l) {
   if (trace) {
     VM.sysWriteln(
         "Lock.returnLock: returning ",
         Magic.objectAsAddress(l),
         " to the global freelist for Thread #",
         RVMThread.getCurrentThreadSlot());
   }
   lockAllocationMutex.lock();
   l.nextFreeLock = globalFreeLock;
   globalFreeLock = l;
   globalFreeLocks++;
   globalLocksFreed++;
   lockAllocationMutex.unlock();
 }
예제 #11
0
 /**
  * Promotes a light-weight lock to a heavy-weight lock. If this returns the lock that you gave it,
  * its mutex will be locked; otherwise, its mutex will be unlocked. Hence, calls to this method
  * should always be followed by a condition lock() or unlock() call.
  *
  * @param o the object to get a heavy-weight lock
  * @param lockOffset the offset of the thin lock word in the object.
  * @return the inflated lock; either the one you gave, or another one, if the lock was inflated by
  *     some other thread.
  */
 @NoNullCheck
 @Unpreemptible
 protected static Lock attemptToInflate(Object o, Offset lockOffset, Lock l) {
   if (false) VM.sysWriteln("l = ", Magic.objectAsAddress(l));
   l.mutex.lock();
   for (int cnt = 0; ; ++cnt) {
     Word bits = Magic.getWordAtOffset(o, lockOffset);
     // check to see if another thread has already created a fat lock
     if (isFat(bits)) {
       if (trace) {
         VM.sysWriteln(
             "Thread #",
             RVMThread.getCurrentThreadSlot(),
             ": freeing lock ",
             Magic.objectAsAddress(l),
             " because we had a double-inflate");
       }
       Lock result = Lock.getLock(getLockIndex(bits));
       if (result == null || result.lockedObject != o) {
         continue; /* this is nasty.  this will happen when a lock
                   is deflated. */
       }
       Lock.free(l);
       l.mutex.unlock();
       return result;
     }
     if (VM.VerifyAssertions) VM._assert(l != null);
     if (attemptToMarkInflated(o, lockOffset, bits, l.index, cnt)) {
       l.setLockedObject(o);
       l.setOwnerId(getLockOwner(bits));
       if (l.getOwnerId() != 0) {
         l.setRecursionCount(getRecCount(bits));
       } else {
         if (VM.VerifyAssertions) VM._assert(l.getRecursionCount() == 0);
       }
       return l;
     }
     // contention detected, try again
   }
 }
예제 #12
0
 /**
  * Recycles an unused heavy-weight lock. Locks are deallocated to processor specific lists, so
  * normally no synchronization is required to obtain or release a lock.
  */
 protected static void free(Lock l) {
   l.active = false;
   RVMThread me = RVMThread.getCurrentThread();
   if (me.cachedFreeLock == null) {
     if (trace) {
       VM.sysWriteln(
           "Lock.free: setting ",
           Magic.objectAsAddress(l),
           " as the cached free lock for Thread #",
           me.getThreadSlot());
     }
     me.cachedFreeLock = l;
   } else {
     if (trace) {
       VM.sysWriteln(
           "Lock.free: returning ",
           Magic.objectAsAddress(l),
           " to the global freelist for Thread #",
           me.getThreadSlot());
     }
     returnLock(l);
   }
 }
예제 #13
0
  @NoInline
  @NoNullCheck
  @Unpreemptible
  public static void lock(Object o, Offset lockOffset) {
    if (STATS) fastLocks++;

    Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());

    for (int cnt = 0; ; cnt++) {
      Word old = Magic.getWordAtOffset(o, lockOffset);
      Word stat = old.and(TL_STAT_MASK);
      boolean tryToInflate = false;
      if (stat.EQ(TL_STAT_BIASABLE)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (ENABLE_BIASED_LOCKING) {
            // lock is unbiased, bias it in our favor and grab it
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) {
              Magic.isync();
              return;
            }
          } else {
            // lock is unbiased but biasing is NOT allowed, so turn it into
            // a thin lock
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) {
              Magic.isync();
              return;
            }
          }
        } else if (id.EQ(threadId)) {
          // lock is biased in our favor
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            setDedicatedU16(o, lockOffset, changed);
            return;
          } else {
            tryToInflate = true;
          }
        } else {
          if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) {
            continue; // don't spin, since it's thin now
          }
        }
      } else if (stat.EQ(TL_STAT_THIN)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) {
            Magic.isync();
            return;
          }
        } else if (id.EQ(threadId)) {
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            tryToInflate = true;
          } else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
            Magic.isync();
            return;
          }
        } else if (cnt > retryLimit) {
          tryToInflate = true;
        }
      } else {
        if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
        // lock is fat.  contend on it.
        if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) {
          return;
        }
      }

      if (tryToInflate) {
        if (STATS) slowLocks++;
        // the lock is not fat, is owned by someone else, or else the count wrapped.
        // attempt to inflate it (this may fail, in which case we'll just harmlessly
        // loop around) and lock it (may also fail, if we get the wrong lock).  if it
        // succeeds, we're done.
        // NB: this calls into our attemptToMarkInflated() method, which will do the
        // Right Thing if the lock is biased to someone else.
        if (inflateAndLock(o, lockOffset)) {
          return;
        }
      } else {
        RVMThread.yield();
      }
    }
  }
예제 #14
0
  /**
   * Delivers up an unassigned heavy-weight lock. Locks are allocated from processor specific
   * regions or lists, so normally no synchronization is required to obtain a lock.
   *
   * <p>Collector threads cannot use heavy-weight locks.
   *
   * @return a free Lock; or <code>null</code>, if garbage collection is not enabled
   */
  @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
  static Lock allocate() {
    RVMThread me = RVMThread.getCurrentThread();
    if (me.cachedFreeLock != null) {
      Lock l = me.cachedFreeLock;
      me.cachedFreeLock = null;
      if (trace) {
        VM.sysWriteln(
            "Lock.allocate: returning ", Magic.objectAsAddress(l),
            ", a cached free lock from Thread #", me.getThreadSlot());
      }
      return l;
    }

    Lock l = null;
    while (l == null) {
      if (globalFreeLock != null) {
        lockAllocationMutex.lock();
        l = globalFreeLock;
        if (l != null) {
          globalFreeLock = l.nextFreeLock;
          l.nextFreeLock = null;
          l.active = true;
          globalFreeLocks--;
        }
        lockAllocationMutex.unlock();
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ", Magic.objectAsAddress(l),
              " from the global freelist for Thread #", me.getThreadSlot());
        }
      } else {
        l = new Lock(); // may cause thread switch (and processor loss)
        lockAllocationMutex.lock();
        if (globalFreeLock == null) {
          // ok, it's still correct for us to be adding a new lock
          if (nextLockIndex >= MAX_LOCKS) {
            VM.sysWriteln("Too many fat locks"); // make MAX_LOCKS bigger? we can keep going??
            VM.sysFail("Exiting VM with fatal error");
          }
          l.index = nextLockIndex++;
          globalLocksAllocated++;
        } else {
          l = null; // someone added to the freelist, try again
        }
        lockAllocationMutex.unlock();
        if (l != null) {
          if (l.index >= numLocks()) {
            /* We need to grow the table */
            growLocks(l.index);
          }
          addLock(l);
          l.active = true;
          /* make sure other processors see lock initialization.
           * Note: Derek and I BELIEVE that an isync is not required in the other processor because the lock is newly allocated - Bowen */
          Magic.sync();
        }
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ",
              Magic.objectAsAddress(l),
              ", a freshly allocated lock for Thread #",
              me.getThreadSlot());
        }
      }
    }
    return l;
  }