예제 #1
0
 /**
  * Releases this heavy-weight lock on the indicated object.
  *
  * @param o the object to be unlocked
  */
 @Unpreemptible
 public void unlockHeavy(Object o) {
   boolean deflated = false;
   mutex.lock(); // Note: thread switching is not allowed while mutex is held.
   RVMThread me = RVMThread.getCurrentThread();
   if (ownerId != me.getLockingId()) {
     mutex.unlock(); // thread-switching benign
     raiseIllegalMonitorStateException("heavy unlocking", o);
   }
   recursionCount--;
   if (0 < recursionCount) {
     mutex.unlock(); // thread-switching benign
     return;
   }
   if (STATS) unlockOperations++;
   ownerId = 0;
   RVMThread toAwaken = entering.dequeue();
   if (toAwaken == null && entering.isEmpty() && waiting.isEmpty()) { // heavy lock can be deflated
     // Possible project: decide on a heuristic to control when lock should be deflated
     Offset lockOffset = Magic.getObjectType(o).getThinLockOffset();
     if (!lockOffset.isMax()) { // deflate heavy lock
       deflate(o, lockOffset);
       deflated = true;
     }
   }
   mutex.unlock(); // does a Magic.sync();  (thread-switching benign)
   if (toAwaken != null) {
     toAwaken.monitor().lockedBroadcastNoHandshake();
   }
 }
예제 #2
0
 /**
  * Complete the task of acquiring the heavy lock, assuming that the mutex is already acquired
  * (locked).
  */
 @Unpreemptible
 public boolean lockHeavyLocked(Object o) {
   if (lockedObject != o) { // lock disappeared before we got here
     mutex.unlock(); // thread switching benign
     return false;
   }
   if (STATS) lockOperations++;
   RVMThread me = RVMThread.getCurrentThread();
   int threadId = me.getLockingId();
   if (ownerId == threadId) {
     recursionCount++;
   } else if (ownerId == 0) {
     ownerId = threadId;
     recursionCount = 1;
   } else {
     entering.enqueue(me);
     mutex.unlock();
     me.monitor().lockNoHandshake();
     while (entering.isQueued(me)) {
       me.monitor().waitWithHandshake(); // this may spuriously return
     }
     me.monitor().unlock();
     return false;
   }
   mutex.unlock(); // thread-switching benign
   return true;
 }
예제 #3
0
 /**
  * Acquires this heavy-weight lock on the indicated object.
  *
  * @param o the object to be locked
  * @return true, if the lock succeeds; false, otherwise
  */
 @Unpreemptible
 public boolean lockHeavy(Object o) {
   if (tentativeMicrolocking) {
     if (!mutex.tryLock()) {
       return false;
     }
   } else {
     mutex.lock(); // Note: thread switching is not allowed while mutex is held.
   }
   return lockHeavyLocked(o);
 }
예제 #4
0
 static void returnLock(Lock l) {
   if (trace) {
     VM.sysWriteln(
         "Lock.returnLock: returning ",
         Magic.objectAsAddress(l),
         " to the global freelist for Thread #",
         RVMThread.getCurrentThreadSlot());
   }
   lockAllocationMutex.lock();
   l.nextFreeLock = globalFreeLock;
   globalFreeLock = l;
   globalFreeLocks++;
   globalLocksFreed++;
   lockAllocationMutex.unlock();
 }
예제 #5
0
  /** Grow the locks table by allocating a new spine chunk. */
  @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
  static void growLocks(int id) {
    int spineId = id >> LOG_LOCK_CHUNK_SIZE;
    if (spineId >= LOCK_SPINE_SIZE) {
      VM.sysFail("Cannot grow lock array greater than maximum possible index");
    }
    for (int i = chunksAllocated; i <= spineId; i++) {
      if (locks[i] != null) {
        /* We were beaten to it */
        continue;
      }

      /* Allocate the chunk */
      Lock[] newChunk = new Lock[LOCK_CHUNK_SIZE];

      lockAllocationMutex.lock();
      if (locks[i] == null) {
        /* We got here first */
        locks[i] = newChunk;
        chunksAllocated++;
      }
      lockAllocationMutex.unlock();
    }
  }
예제 #6
0
  /**
   * Delivers up an unassigned heavy-weight lock. Locks are allocated from processor specific
   * regions or lists, so normally no synchronization is required to obtain a lock.
   *
   * <p>Collector threads cannot use heavy-weight locks.
   *
   * @return a free Lock; or <code>null</code>, if garbage collection is not enabled
   */
  @UnpreemptibleNoWarn("The caller is prepared to lose control when it allocates a lock")
  static Lock allocate() {
    RVMThread me = RVMThread.getCurrentThread();
    if (me.cachedFreeLock != null) {
      Lock l = me.cachedFreeLock;
      me.cachedFreeLock = null;
      if (trace) {
        VM.sysWriteln(
            "Lock.allocate: returning ", Magic.objectAsAddress(l),
            ", a cached free lock from Thread #", me.getThreadSlot());
      }
      return l;
    }

    Lock l = null;
    while (l == null) {
      if (globalFreeLock != null) {
        lockAllocationMutex.lock();
        l = globalFreeLock;
        if (l != null) {
          globalFreeLock = l.nextFreeLock;
          l.nextFreeLock = null;
          l.active = true;
          globalFreeLocks--;
        }
        lockAllocationMutex.unlock();
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ", Magic.objectAsAddress(l),
              " from the global freelist for Thread #", me.getThreadSlot());
        }
      } else {
        l = new Lock(); // may cause thread switch (and processor loss)
        lockAllocationMutex.lock();
        if (globalFreeLock == null) {
          // ok, it's still correct for us to be adding a new lock
          if (nextLockIndex >= MAX_LOCKS) {
            VM.sysWriteln("Too many fat locks"); // make MAX_LOCKS bigger? we can keep going??
            VM.sysFail("Exiting VM with fatal error");
          }
          l.index = nextLockIndex++;
          globalLocksAllocated++;
        } else {
          l = null; // someone added to the freelist, try again
        }
        lockAllocationMutex.unlock();
        if (l != null) {
          if (l.index >= numLocks()) {
            /* We need to grow the table */
            growLocks(l.index);
          }
          addLock(l);
          l.active = true;
          /* make sure other processors see lock initialization.
           * Note: Derek and I BELIEVE that an isync is not required in the other processor because the lock is newly allocated - Bowen */
          Magic.sync();
        }
        if (trace && l != null) {
          VM.sysWriteln(
              "Lock.allocate: returning ",
              Magic.objectAsAddress(l),
              ", a freshly allocated lock for Thread #",
              me.getThreadSlot());
        }
      }
    }
    return l;
  }