示例#1
0
 /** RaceDet: get recursion count, assuming lock is held by current thread */
 @Uninterruptible
 @NoNullCheck
 @Inline
 public static final int getRecursionCountLocked(Object o, Offset lockOffset) {
   if (VM.VerifyAssertions) {
     VM._assert(holdsLock(o, lockOffset, RVMThread.getCurrentThread()));
   }
   Word bits = Magic.getWordAtOffset(o, lockOffset);
   int count;
   if (bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT)) {
     // if locked, then it is locked with a fat lock
     Lock l = Lock.getLock(getLockIndex(bits));
     l.mutex.lock();
     count = l.getRecursionCount();
     l.mutex.unlock();
   } else {
     if (VM.VerifyAssertions) {
       VM._assert(
           bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE) || bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN));
     }
     count = getRecCount(bits);
   }
   if (VM.VerifyAssertions) {
     VM._assert(count > 0);
   }
   return count;
 }
示例#2
0
 /**
  * Releases this heavy-weight lock on the indicated object.
  *
  * @param o the object to be unlocked
  */
 @Unpreemptible
 public void unlockHeavy(Object o) {
   boolean deflated = false;
   mutex.lock(); // Note: thread switching is not allowed while mutex is held.
   RVMThread me = RVMThread.getCurrentThread();
   if (ownerId != me.getLockingId()) {
     mutex.unlock(); // thread-switching benign
     raiseIllegalMonitorStateException("heavy unlocking", o);
   }
   recursionCount--;
   if (0 < recursionCount) {
     mutex.unlock(); // thread-switching benign
     return;
   }
   if (STATS) unlockOperations++;
   ownerId = 0;
   RVMThread toAwaken = entering.dequeue();
   if (toAwaken == null && entering.isEmpty() && waiting.isEmpty()) { // heavy lock can be deflated
     // Possible project: decide on a heuristic to control when lock should be deflated
     Offset lockOffset = Magic.getObjectType(o).getThinLockOffset();
     if (!lockOffset.isMax()) { // deflate heavy lock
       deflate(o, lockOffset);
       deflated = true;
     }
   }
   mutex.unlock(); // does a Magic.sync();  (thread-switching benign)
   if (toAwaken != null) {
     toAwaken.monitor().lockedBroadcastNoHandshake();
   }
 }
示例#3
0
 /**
  * Complete the task of acquiring the heavy lock, assuming that the mutex is already acquired
  * (locked).
  */
 @Unpreemptible
 public boolean lockHeavyLocked(Object o) {
   if (lockedObject != o) { // lock disappeared before we got here
     mutex.unlock(); // thread switching benign
     return false;
   }
   if (STATS) lockOperations++;
   RVMThread me = RVMThread.getCurrentThread();
   int threadId = me.getLockingId();
   if (ownerId == threadId) {
     recursionCount++;
   } else if (ownerId == 0) {
     ownerId = threadId;
     recursionCount = 1;
   } else {
     entering.enqueue(me);
     mutex.unlock();
     me.monitor().lockNoHandshake();
     while (entering.isQueued(me)) {
       me.monitor().waitWithHandshake(); // this may spuriously return
     }
     me.monitor().unlock();
     return false;
   }
   mutex.unlock(); // thread-switching benign
   return true;
 }
示例#4
0
  @NoInline
  @Unpreemptible
  public static boolean casFromBiased(
      Object o, Offset lockOffset, Word oldLockWord, Word changed, int cnt) {
    RVMThread me = RVMThread.getCurrentThread();
    Word id = oldLockWord.and(TL_THREAD_ID_MASK);
    if (id.isZero()) {
      if (false) VM.sysWriteln("id is zero - easy case.");
      return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
    } else {
      if (false) VM.sysWriteln("id = ", id);
      int slot = id.toInt() >> TL_THREAD_ID_SHIFT;
      if (false) VM.sysWriteln("slot = ", slot);
      RVMThread owner = RVMThread.threadBySlot[slot];
      if (owner == me /* I own it, so I can unbias it trivially.  This occurs
                       when we are inflating due to, for example, wait() */
          || owner == null /* the thread that owned it is dead, so it's safe to
                         unbias. */) {
        // note that we use a CAS here, but it's only needed in the case
        // that owner==null, since in that case some other thread may also
        // be unbiasing.
        return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
      } else {
        boolean result = false;

        // NB. this may stop a thread other than the one that had the bias,
        // if that thread died and some other thread took its slot.  that's
        // why we do a CAS below.  it's only needed if some other thread
        // had seen the owner be null (which may happen if we came here after
        // a new thread took the slot while someone else came here when the
        // slot was still null).  if it was the case that everyone else had
        // seen a non-null owner, then the pair handshake would serve as
        // sufficient synchronization (the id would identify the set of threads
        // that shared that id's communicationLock).  oddly, that means that
        // this whole thing could be "simplified" to acquire the
        // communicationLock even if the owner was null.  but that would be
        // goofy.
        if (false) VM.sysWriteln("entering pair handshake");
        owner.beginPairHandshake();
        if (false) VM.sysWriteln("done with that");

        Word newLockWord = Magic.getWordAtOffset(o, lockOffset);
        result = Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
        owner.endPairHandshake();
        if (false) VM.sysWriteln("that worked.");

        return result;
      }
    }
  }
示例#5
0
 @NoInline
 @NoNullCheck
 @Unpreemptible
 public static void unlock(Object o, Offset lockOffset) {
   Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
   for (int cnt = 0; ; cnt++) {
     Word old = Magic.getWordAtOffset(o, lockOffset);
     Word stat = old.and(TL_STAT_MASK);
     if (stat.EQ(TL_STAT_BIASABLE)) {
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           RVMThread.raiseIllegalMonitorStateException(
               "biased unlocking: we own this object but the count is already zero", o);
         }
         setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
         return;
       } else {
         RVMThread.raiseIllegalMonitorStateException(
             "biased unlocking: we don't own this object", o);
       }
     } else if (stat.EQ(TL_STAT_THIN)) {
       Magic.sync();
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         Word changed;
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
         } else {
           changed = old.minus(TL_LOCK_COUNT_UNIT);
         }
         if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
           return;
         }
       } else {
         if (false) {
           VM.sysWriteln("threadId = ", threadId);
           VM.sysWriteln("id = ", id);
         }
         RVMThread.raiseIllegalMonitorStateException(
             "thin unlocking: we don't own this object", o);
       }
     } else {
       if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
       // fat unlock
       Lock.getLock(getLockIndex(old)).unlockHeavy(o);
       return;
     }
   }
 }
示例#6
0
 @Inline
 @NoNullCheck
 @Unpreemptible
 public static void inlineUnlockHelper(Object o, Offset lockOffset) {
   Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
   Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
   Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
   if (id.EQ(tid)) {
     if (!old.and(TL_LOCK_COUNT_MASK).isZero()) {
       setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
       return;
     }
   } else if (old.xor(tid).rshl(TL_LOCK_COUNT_SHIFT).EQ(TL_STAT_THIN.rshl(TL_LOCK_COUNT_SHIFT))) {
     Magic.sync();
     if (Magic.attemptWord(o, lockOffset, old, old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN))) {
       return;
     }
   }
   unlock(o, lockOffset);
 }
示例#7
0
 @Inline
 @NoNullCheck
 @Unpreemptible
 public static void inlineLockHelper(Object o, Offset lockOffset) {
   Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
   Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
   Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
   if (id.EQ(tid)) {
     Word changed = old.plus(TL_LOCK_COUNT_UNIT);
     if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
       setDedicatedU16(o, lockOffset, changed);
       return;
     }
   } else if (id.EQ(TL_STAT_THIN)) {
     // lock is thin and not held by anyone
     if (Magic.attemptWord(o, lockOffset, old, old.or(tid))) {
       Magic.isync();
       return;
     }
   }
   lock(o, lockOffset);
 }
示例#8
0
 /**
  * Recycles an unused heavy-weight lock. Locks are deallocated to processor specific lists, so
  * normally no synchronization is required to obtain or release a lock.
  */
 protected static void free(Lock l) {
   l.active = false;
   RVMThread me = RVMThread.getCurrentThread();
   if (me.cachedFreeLock == null) {
     if (trace) {
       VM.sysWriteln(
           "Lock.free: setting ",
           Magic.objectAsAddress(l),
           " as the cached free lock for Thread #",
           me.getThreadSlot());
     }
     me.cachedFreeLock = l;
   } else {
     if (trace) {
       VM.sysWriteln(
           "Lock.free: returning ",
           Magic.objectAsAddress(l),
           " to the global freelist for Thread #",
           me.getThreadSlot());
     }
     returnLock(l);
   }
 }
示例#9
0
  @NoInline
  @NoNullCheck
  @Unpreemptible
  public static void lock(Object o, Offset lockOffset) {
    if (STATS) fastLocks++;

    Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());

    for (int cnt = 0; ; cnt++) {
      Word old = Magic.getWordAtOffset(o, lockOffset);
      Word stat = old.and(TL_STAT_MASK);
      boolean tryToInflate = false;
      if (stat.EQ(TL_STAT_BIASABLE)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (ENABLE_BIASED_LOCKING) {
            // lock is unbiased, bias it in our favor and grab it
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) {
              Magic.isync();
              return;
            }
          } else {
            // lock is unbiased but biasing is NOT allowed, so turn it into
            // a thin lock
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) {
              Magic.isync();
              return;
            }
          }
        } else if (id.EQ(threadId)) {
          // lock is biased in our favor
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            setDedicatedU16(o, lockOffset, changed);
            return;
          } else {
            tryToInflate = true;
          }
        } else {
          if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) {
            continue; // don't spin, since it's thin now
          }
        }
      } else if (stat.EQ(TL_STAT_THIN)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) {
            Magic.isync();
            return;
          }
        } else if (id.EQ(threadId)) {
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            tryToInflate = true;
          } else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
            Magic.isync();
            return;
          }
        } else if (cnt > retryLimit) {
          tryToInflate = true;
        }
      } else {
        if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
        // lock is fat.  contend on it.
        if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) {
          return;
        }
      }

      if (tryToInflate) {
        if (STATS) slowLocks++;
        // the lock is not fat, is owned by someone else, or else the count wrapped.
        // attempt to inflate it (this may fail, in which case we'll just harmlessly
        // loop around) and lock it (may also fail, if we get the wrong lock).  if it
        // succeeds, we're done.
        // NB: this calls into our attemptToMarkInflated() method, which will do the
        // Right Thing if the lock is biased to someone else.
        if (inflateAndLock(o, lockOffset)) {
          return;
        }
      } else {
        RVMThread.yield();
      }
    }
  }
示例#10
0
  /**
   * Delivers up an unassigned heavy-weight lock. Locks are allocated from processor specific
   * regions or lists, so normally no synchronization is required to obtain a lock.
   *
   * <p>Collector threads cannot use heavy-weight locks.
   *
   * @return a free Lock; or <code>null</code>, if garbage collection is not enabled
   */
  @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
  static Lock allocate() {
    RVMThread me = RVMThread.getCurrentThread();
    if (me.cachedFreeLock != null) {
      Lock l = me.cachedFreeLock;
      me.cachedFreeLock = null;
      if (trace) {
        VM.sysWriteln(
            "Lock.allocate: returning ", Magic.objectAsAddress(l),
            ", a cached free lock from Thread #", me.getThreadSlot());
      }
      return l;
    }

    Lock l = null;
    while (l == null) {
      if (globalFreeLock != null) {
        lockAllocationMutex.lock();
        l = globalFreeLock;
        if (l != null) {
          globalFreeLock = l.nextFreeLock;
          l.nextFreeLock = null;
          l.active = true;
          globalFreeLocks--;
        }
        lockAllocationMutex.unlock();
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ", Magic.objectAsAddress(l),
              " from the global freelist for Thread #", me.getThreadSlot());
        }
      } else {
        l = new Lock(); // may cause thread switch (and processor loss)
        lockAllocationMutex.lock();
        if (globalFreeLock == null) {
          // ok, it's still correct for us to be adding a new lock
          if (nextLockIndex >= MAX_LOCKS) {
            VM.sysWriteln("Too many fat locks"); // make MAX_LOCKS bigger? we can keep going??
            VM.sysFail("Exiting VM with fatal error");
          }
          l.index = nextLockIndex++;
          globalLocksAllocated++;
        } else {
          l = null; // someone added to the freelist, try again
        }
        lockAllocationMutex.unlock();
        if (l != null) {
          if (l.index >= numLocks()) {
            /* We need to grow the table */
            growLocks(l.index);
          }
          addLock(l);
          l.active = true;
          /* make sure other processors see lock initialization.
           * Note: Derek and I BELIEVE that an isync is not required in the other processor because the lock is newly allocated - Bowen */
          Magic.sync();
        }
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ",
              Magic.objectAsAddress(l),
              ", a freshly allocated lock for Thread #",
              me.getThreadSlot());
        }
      }
    }
    return l;
  }