コード例 #1
0
ファイル: ImmortalSpace.java プロジェクト: vilay/check
 /** atomically write the given value in the mark bit. */
 private static void atomicWriteMarkBit(ObjectReference object, Word value) {
   while (true) {
     Word oldValue = ObjectModel.prepareAvailableBits(object);
     Word newValue = oldValue.and(GC_MARK_BIT_MASK.not()).or(value);
     if (ObjectModel.attemptAvailableBits(object, oldValue, newValue)) break;
   }
 }
コード例 #2
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 /** RaceDet: get recursion count, assuming lock is held by current thread */
 @Uninterruptible
 @NoNullCheck
 @Inline
 public static final int getRecursionCountLocked(Object o, Offset lockOffset) {
   if (VM.VerifyAssertions) {
     VM._assert(holdsLock(o, lockOffset, RVMThread.getCurrentThread()));
   }
   Word bits = Magic.getWordAtOffset(o, lockOffset);
   int count;
   if (bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT)) {
     // if locked, then it is locked with a fat lock
     Lock l = Lock.getLock(getLockIndex(bits));
     l.mutex.lock();
     count = l.getRecursionCount();
     l.mutex.unlock();
   } else {
     if (VM.VerifyAssertions) {
       VM._assert(
           bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE) || bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN));
     }
     count = getRecCount(bits);
   }
   if (VM.VerifyAssertions) {
     VM._assert(count > 0);
   }
   return count;
 }
コード例 #3
0
ファイル: ImmortalSpace.java プロジェクト: vilay/check
 /**
  * Used to mark boot image objects during a parallel scan of objects during GC Returns true if
  * marking was done.
  */
 private static boolean testAndMark(ObjectReference object, Word value) throws InlinePragma {
   Word oldValue;
   do {
     oldValue = ObjectModel.prepareAvailableBits(object);
     Word markBit = oldValue.and(GC_MARK_BIT_MASK);
     if (markBit.EQ(value)) return false;
   } while (!ObjectModel.attemptAvailableBits(object, oldValue, oldValue.xor(GC_MARK_BIT_MASK)));
   return true;
 }
コード例 #4
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @Uninterruptible
 public static int getRecCount(Word lockWord) {
   if (VM.VerifyAssertions) VM._assert(getLockOwner(lockWord) != 0);
   if (lockWord.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
     return lockWord.and(TL_LOCK_COUNT_MASK).rshl(TL_LOCK_COUNT_SHIFT).toInt();
   } else {
     return lockWord.and(TL_LOCK_COUNT_MASK).rshl(TL_LOCK_COUNT_SHIFT).toInt() + 1;
   }
 }
コード例 #5
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @Uninterruptible
 public static int getLockOwner(Word lockWord) {
   if (VM.VerifyAssertions) VM._assert(!isFat(lockWord));
   if (lockWord.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
     if (lockWord.and(TL_LOCK_COUNT_MASK).isZero()) {
       return 0;
     } else {
       return lockWord.and(TL_THREAD_ID_MASK).toInt();
     }
   } else {
     return lockWord.and(TL_THREAD_ID_MASK).toInt();
   }
 }
コード例 #6
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
  @NoInline
  @Unpreemptible
  public static boolean casFromBiased(
      Object o, Offset lockOffset, Word oldLockWord, Word changed, int cnt) {
    RVMThread me = RVMThread.getCurrentThread();
    Word id = oldLockWord.and(TL_THREAD_ID_MASK);
    if (id.isZero()) {
      if (false) VM.sysWriteln("id is zero - easy case.");
      return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
    } else {
      if (false) VM.sysWriteln("id = ", id);
      int slot = id.toInt() >> TL_THREAD_ID_SHIFT;
      if (false) VM.sysWriteln("slot = ", slot);
      RVMThread owner = RVMThread.threadBySlot[slot];
      if (owner == me /* I own it, so I can unbias it trivially.  This occurs
                       when we are inflating due to, for example, wait() */
          || owner == null /* the thread that owned it is dead, so it's safe to
                         unbias. */) {
        // note that we use a CAS here, but it's only needed in the case
        // that owner==null, since in that case some other thread may also
        // be unbiasing.
        return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
      } else {
        boolean result = false;

        // NB. this may stop a thread other than the one that had the bias,
        // if that thread died and some other thread took its slot.  that's
        // why we do a CAS below.  it's only needed if some other thread
        // had seen the owner be null (which may happen if we came here after
        // a new thread took the slot while someone else came here when the
        // slot was still null).  if it was the case that everyone else had
        // seen a non-null owner, then the pair handshake would serve as
        // sufficient synchronization (the id would identify the set of threads
        // that shared that id's communicationLock).  oddly, that means that
        // this whole thing could be "simplified" to acquire the
        // communicationLock even if the owner was null.  but that would be
        // goofy.
        if (false) VM.sysWriteln("entering pair handshake");
        owner.beginPairHandshake();
        if (false) VM.sysWriteln("done with that");

        Word newLockWord = Magic.getWordAtOffset(o, lockOffset);
        result = Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
        owner.endPairHandshake();
        if (false) VM.sysWriteln("that worked.");

        return result;
      }
    }
  }
コード例 #7
0
ファイル: Lock.java プロジェクト: luiseduardohdbackup/mrp
 /** Sets up the data structures for holding heavy-weight locks. */
 @Interruptible
 public static void init() {
   nextLockIndex = 1;
   locks = new Lock[LOCK_SPINE_SIZE][];
   for (int i = 0; i < INITIAL_CHUNKS; i++) {
     chunksAllocated++;
     locks[i] = new Lock[LOCK_CHUNK_SIZE];
   }
   if (VM.VerifyAssertions) {
     // check that each potential lock is addressable
     if (((MAX_LOCKS - 1)
             > ThinLockConstants.TL_LOCK_ID_MASK.rshl(ThinLockConstants.TL_LOCK_ID_SHIFT).toInt())
         && ThinLockConstants.TL_LOCK_ID_MASK.NE(Word.fromIntSignExtend(-1))) {
       VM._assert(
           false,
           "Object model doesn't allow all locks to be addressable "
               + "(max locks - "
               + MAX_LOCKS
               + ", thin lock mask - "
               + ThinLockConstants.TL_LOCK_ID_MASK.toInt()
               + ", thin lock shift"
               + ThinLockConstants.TL_LOCK_ID_SHIFT
               + ")");
     }
   }
 }
コード例 #8
0
 private void zeroBlock(Address block) {
   // FIXME: efficiency check here!
   if (VM.VERIFY_ASSERTIONS)
     VM.assertions._assert(
         block.toWord().and(Word.fromIntSignExtend(BYTES_IN_BLOCK - 1)).isZero());
   VM.memory.zero(block, Extent.fromIntZeroExtend(BYTES_IN_BLOCK));
 }
コード例 #9
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 /**
  * Set only the dedicated locking 16-bit part of the given value. This is the only part that is
  * allowed to be written without a CAS. This takes care of the shifting and storing of the value.
  *
  * @param o The object whose header is to be changed
  * @param lockOffset The lock offset
  * @param value The value which contains the 16-bit portion to be written.
  */
 @Inline
 @Unpreemptible
 private static void setDedicatedU16(Object o, Offset lockOffset, Word value) {
   Magic.setCharAtOffset(
       o,
       lockOffset.plus(TL_DEDICATED_U16_OFFSET),
       (char) (value.toInt() >>> TL_DEDICATED_U16_SHIFT));
 }
コード例 #10
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
  @Inline
  @Uninterruptible
  private static Word biasBitsToThinBits(Word bits) {
    int lockOwner = getLockOwner(bits);

    Word changed = bits.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);

    if (lockOwner != 0) {
      int recCount = getRecCount(bits);
      changed =
          changed
              .or(Word.fromIntZeroExtend(lockOwner))
              .or(Word.fromIntZeroExtend(recCount - 1).lsh(TL_LOCK_COUNT_SHIFT));
    }

    return changed;
  }
コード例 #11
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @Uninterruptible
 public static boolean attemptToMarkDeflated(Object o, Offset lockOffset, Word oldLockWord) {
   // we allow concurrent modification of the lock word when it's thin or fat.
   Word changed = oldLockWord.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
   if (VM.VerifyAssertions) VM._assert(getLockOwner(changed) == 0);
   return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
 }
コード例 #12
0
ファイル: VM_Memory.java プロジェクト: rmcilroy/HeraJVM
 @Inline
 public static Address alignUp(Address address, int alignment) {
   return address
       .plus(alignment - 1)
       .toWord()
       .and(Word.fromIntSignExtend(~(alignment - 1)))
       .toAddress();
 }
コード例 #13
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Uninterruptible
 @NoNullCheck
 public static boolean holdsLock(Object o, Offset lockOffset, RVMThread thread) {
   for (int cnt = 0; ; ++cnt) {
     int tid = thread.getLockingId();
     Word bits = Magic.getWordAtOffset(o, lockOffset);
     if (bits.and(TL_STAT_MASK).EQ(TL_STAT_BIASABLE)) {
       // if locked, then it is locked with a thin lock
       return bits.and(TL_THREAD_ID_MASK).toInt() == tid && !bits.and(TL_LOCK_COUNT_MASK).isZero();
     } else if (bits.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) {
       return bits.and(TL_THREAD_ID_MASK).toInt() == tid;
     } else {
       if (VM.VerifyAssertions) VM._assert(bits.and(TL_STAT_MASK).EQ(TL_STAT_FAT));
       // if locked, then it is locked with a fat lock
       Lock l = Lock.getLock(getLockIndex(bits));
       if (l != null) {
         l.mutex.lock();
         boolean result = (l.getOwnerId() == tid && l.getLockedObject() == o);
         l.mutex.unlock();
         return result;
       }
     }
     RVMThread.yield();
   }
 }
コード例 #14
0
ファイル: LargeObjectSpace.java プロジェクト: chanwit/vmkit
 /**
  * Prepare for a new collection increment. For the mark-sweep collector we must flip the state of
  * the mark bit between collections.
  */
 public void prepare(boolean fullHeap) {
   if (fullHeap) {
     if (VM.VERIFY_ASSERTIONS) {
       VM.assertions._assert(treadmill.fromSpaceEmpty());
     }
     markState = MARK_BIT.minus(markState);
   }
   treadmill.flip(fullHeap);
   inNurseryGC = !fullHeap;
 }
コード例 #15
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 /**
  * Return the lock index for a given lock word. Assert valid index ranges, that the fat lock bit
  * is set, and that the lock entry exists.
  *
  * @param lockWord The lock word whose lock index is being established
  * @return the lock index corresponding to the lock workd.
  */
 @Inline
 @Uninterruptible
 public static int getLockIndex(Word lockWord) {
   int index = lockWord.and(TL_LOCK_ID_MASK).rshl(TL_LOCK_ID_SHIFT).toInt();
   if (VM.VerifyAssertions) {
     if (!(index > 0 && index < Lock.numLocks())) {
       VM.sysWrite("Lock index out of range! Word: ");
       VM.sysWrite(lockWord);
       VM.sysWrite(" index: ");
       VM.sysWrite(index);
       VM.sysWrite(" locks: ");
       VM.sysWrite(Lock.numLocks());
       VM.sysWriteln();
     }
     VM._assert(index > 0 && index < Lock.numLocks()); // index is in range
     VM._assert(lockWord.and(TL_STAT_MASK).EQ(TL_STAT_FAT)); // fat lock bit is set
   }
   return index;
 }
コード例 #16
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @NoNullCheck
 @Unpreemptible
 public static void inlineLockHelper(Object o, Offset lockOffset) {
   Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
   Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
   Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
   if (id.EQ(tid)) {
     Word changed = old.plus(TL_LOCK_COUNT_UNIT);
     if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
       setDedicatedU16(o, lockOffset, changed);
       return;
     }
   } else if (id.EQ(TL_STAT_THIN)) {
     // lock is thin and not held by anyone
     if (Magic.attemptWord(o, lockOffset, old, old.or(tid))) {
       Magic.isync();
       return;
     }
   }
   lock(o, lockOffset);
 }
コード例 #17
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @Unpreemptible
 public static boolean attemptToMarkInflated(
     Object o, Offset lockOffset, Word oldLockWord, int lockId, int cnt) {
   if (VM.VerifyAssertions) VM._assert(oldLockWord.and(TL_STAT_MASK).NE(TL_STAT_FAT));
   if (false) VM.sysWriteln("attemptToMarkInflated with oldLockWord = ", oldLockWord);
   // what this needs to do:
   // 1) if the lock is thin, it's just a CAS
   // 2) if the lock is unbiased, CAS in the inflation
   // 3) if the lock is biased in our favor, store the lock without CAS
   // 4) if the lock is biased but to someone else, enter the pair handshake
   //    to unbias it and install the inflated lock
   Word changed =
       TL_STAT_FAT
           .or(Word.fromIntZeroExtend(lockId).lsh(TL_LOCK_ID_SHIFT))
           .or(oldLockWord.and(TL_UNLOCK_MASK));
   if (false && oldLockWord.and(TL_STAT_MASK).EQ(TL_STAT_THIN))
     VM.sysWriteln(
         "obj = ",
         Magic.objectAsAddress(o),
         ", old = ",
         oldLockWord,
         ", owner = ",
         getLockOwner(oldLockWord),
         ", rec = ",
         getLockOwner(oldLockWord) == 0 ? 0 : getRecCount(oldLockWord),
         ", changed = ",
         changed,
         ", lockId = ",
         lockId);
   if (false) VM.sysWriteln("changed = ", changed);
   if (oldLockWord.and(TL_STAT_MASK).EQ(TL_STAT_THIN)) {
     if (false) VM.sysWriteln("it's thin, inflating the easy way.");
     return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed);
   } else {
     return casFromBiased(o, lockOffset, oldLockWord, changed, cnt);
   }
 }
コード例 #18
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @NoNullCheck
 @Unpreemptible
 public static void inlineUnlockHelper(Object o, Offset lockOffset) {
   Word old = Magic.prepareWord(o, lockOffset); // FIXME: bad for PPC?
   Word id = old.and(TL_THREAD_ID_MASK.or(TL_STAT_MASK));
   Word tid = Word.fromIntSignExtend(RVMThread.getCurrentThread().getLockingId());
   if (id.EQ(tid)) {
     if (!old.and(TL_LOCK_COUNT_MASK).isZero()) {
       setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
       return;
     }
   } else if (old.xor(tid).rshl(TL_LOCK_COUNT_SHIFT).EQ(TL_STAT_THIN.rshl(TL_LOCK_COUNT_SHIFT))) {
     Magic.sync();
     if (Magic.attemptWord(o, lockOffset, old, old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN))) {
       return;
     }
   }
   unlock(o, lockOffset);
 }
コード例 #19
0
ファイル: LargeObjectSpace.java プロジェクト: chanwit/vmkit
 /**
  * Perform any required initialization of the GC portion of the header.
  *
  * @param object the object ref to the storage to be initialized
  * @param alloc is this initialization occuring due to (initial) allocation (true) or due to
  *     copying (false)?
  */
 @Inline
 public void initializeHeader(ObjectReference object, boolean alloc) {
   Word oldValue = VM.objectModel.readAvailableBitsWord(object);
   Word newValue = oldValue.and(LOS_BIT_MASK.not()).or(markState);
   if (alloc) newValue = newValue.or(NURSERY_BIT);
   if (Plan.NEEDS_LOG_BIT_IN_HEADER) newValue = newValue.or(Plan.UNLOGGED_BIT);
   VM.objectModel.writeAvailableBitsWord(object, newValue);
   Address cell = VM.objectModel.objectStartRef(object);
   treadmill.addToTreadmill(Treadmill.midPayloadToNode(cell), alloc);
 }
コード例 #20
0
ファイル: LargeObjectSpace.java プロジェクト: chanwit/vmkit
 /**
  * Atomically attempt to set the mark bit of an object. Return true if successful, false if the
  * mark bit was already set.
  *
  * @param object The object whose mark bit is to be written
  * @param value The value to which the mark bit will be set
  */
 @Inline
 private boolean testAndMark(ObjectReference object, Word value) {
   Word oldValue, markBit;
   do {
     oldValue = VM.objectModel.prepareAvailableBits(object);
     markBit = oldValue.and(inNurseryGC ? LOS_BIT_MASK : MARK_BIT);
     if (markBit.EQ(value)) return false;
   } while (!VM.objectModel.attemptAvailableBits(
       object, oldValue, oldValue.and(LOS_BIT_MASK.not()).or(value)));
   return true;
 }
コード例 #21
0
ファイル: LargeObjectSpace.java プロジェクト: chanwit/vmkit
 /**
  * The caller specifies the region of virtual memory to be used for this space. If this region
  * conflicts with an existing space, then the constructor will fail.
  *
  * @param name The name of this space (used when printing error messages etc)
  * @param pageBudget The number of pages this space may consume before consulting the plan
  * @param vmRequest An object describing the virtual memory requested.
  */
 public LargeObjectSpace(String name, int pageBudget, VMRequest vmRequest) {
   super(name, pageBudget, vmRequest);
   treadmill = new Treadmill(LOG_BYTES_IN_PAGE, true);
   markState = Word.zero();
 }
コード例 #22
0
ファイル: LargeObjectSpace.java プロジェクト: chanwit/vmkit
/** Each instance of this class corresponds to one explicitly managed large object space. */
@Uninterruptible
public final class LargeObjectSpace extends BaseLargeObjectSpace {

  /**
   * **************************************************************************
   *
   * <p>Class variables
   */
  public static final int LOCAL_GC_BITS_REQUIRED = 2;

  public static final int GLOBAL_GC_BITS_REQUIRED = 0;
  private static final Word MARK_BIT = Word.one(); // ...01
  private static final Word NURSERY_BIT = Word.fromIntZeroExtend(2); // ...10
  private static final Word LOS_BIT_MASK = Word.fromIntZeroExtend(3); // ...11

  /**
   * **************************************************************************
   *
   * <p>Instance variables
   */
  private Word markState;

  private boolean inNurseryGC;
  private final Treadmill treadmill;

  /**
   * **************************************************************************
   *
   * <p>Initialization
   */

  /**
   * The caller specifies the region of virtual memory to be used for this space. If this region
   * conflicts with an existing space, then the constructor will fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param vmRequest An object describing the virtual memory requested.
   */
  public LargeObjectSpace(String name, int pageBudget, VMRequest vmRequest) {
    super(name, pageBudget, vmRequest);
    treadmill = new Treadmill(LOG_BYTES_IN_PAGE, true);
    markState = Word.zero();
  }

  /**
   * **************************************************************************
   *
   * <p>Collection
   */

  /**
   * Prepare for a new collection increment. For the mark-sweep collector we must flip the state of
   * the mark bit between collections.
   */
  public void prepare(boolean fullHeap) {
    if (fullHeap) {
      if (VM.VERIFY_ASSERTIONS) {
        VM.assertions._assert(treadmill.fromSpaceEmpty());
      }
      markState = MARK_BIT.minus(markState);
    }
    treadmill.flip(fullHeap);
    inNurseryGC = !fullHeap;
  }

  /**
   * A new collection increment has completed. For the mark-sweep collector this means we can
   * perform the sweep phase.
   */
  public void release(boolean fullHeap) {
    // sweep the large objects
    sweepLargePages(true); // sweep the nursery
    if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(treadmill.nurseryEmpty());
    if (fullHeap) sweepLargePages(false); // sweep the mature space
  }

  /** Sweep through the large pages, releasing all superpages on the "from space" treadmill. */
  private void sweepLargePages(boolean sweepNursery) {
    while (true) {
      Address cell = sweepNursery ? treadmill.popNursery() : treadmill.pop();
      if (cell.isZero()) break;
      release(getSuperPage(cell));
    }
    if (VM.VERIFY_ASSERTIONS)
      VM.assertions._assert(sweepNursery ? treadmill.nurseryEmpty() : treadmill.fromSpaceEmpty());
  }

  /**
   * Release a group of pages that were allocated together.
   *
   * @param first The first page in the group of pages that were allocated together.
   */
  @Inline
  public void release(Address first) {
    ((FreeListPageResource) pr).releasePages(first);
  }

  /**
   * **************************************************************************
   *
   * <p>Object processing and tracing
   */

  /**
   * Trace a reference to an object under a mark sweep collection policy. If the object header is
   * not already marked, mark the object in either the bitmap or by moving it off the treadmill, and
   * enqueue the object for subsequent processing. The object is marked as (an atomic) side-effect
   * of checking whether already marked.
   *
   * @param trace The trace being conducted.
   * @param object The object to be traced.
   * @return The object (there is no object forwarding in this collector, so we always return the
   *     same object: this could be a void method but for compliance to a more general interface).
   */
  @Inline
  public ObjectReference traceObject(TransitiveClosure trace, ObjectReference object) {
    boolean nurseryObject = isInNursery(object);
    if (!inNurseryGC || nurseryObject) {
      if (testAndMark(object, markState)) {
        internalMarkObject(object, nurseryObject);
        trace.processNode(object);
      }
    }
    return object;
  }

  /**
   * @param object The object in question
   * @return True if this object is known to be live (i.e. it is marked)
   */
  @Inline
  public boolean isLive(ObjectReference object) {
    return testMarkBit(object, markState);
  }

  /**
   * An object has been marked (identifiged as live). Large objects are added to the to-space
   * treadmill, while all other objects will have a mark bit set in the superpage header.
   *
   * @param object The object which has been marked.
   */
  @Inline
  private void internalMarkObject(ObjectReference object, boolean nurseryObject) {

    Address cell = VM.objectModel.objectStartRef(object);
    Address node = Treadmill.midPayloadToNode(cell);
    treadmill.copy(node, nurseryObject);
  }

  /**
   * **************************************************************************
   *
   * <p>Header manipulation
   */

  /**
   * Perform any required initialization of the GC portion of the header.
   *
   * @param object the object ref to the storage to be initialized
   * @param alloc is this initialization occuring due to (initial) allocation (true) or due to
   *     copying (false)?
   */
  @Inline
  public void initializeHeader(ObjectReference object, boolean alloc) {
    Word oldValue = VM.objectModel.readAvailableBitsWord(object);
    Word newValue = oldValue.and(LOS_BIT_MASK.not()).or(markState);
    if (alloc) newValue = newValue.or(NURSERY_BIT);
    if (Plan.NEEDS_LOG_BIT_IN_HEADER) newValue = newValue.or(Plan.UNLOGGED_BIT);
    VM.objectModel.writeAvailableBitsWord(object, newValue);
    Address cell = VM.objectModel.objectStartRef(object);
    treadmill.addToTreadmill(Treadmill.midPayloadToNode(cell), alloc);
  }

  /**
   * Atomically attempt to set the mark bit of an object. Return true if successful, false if the
   * mark bit was already set.
   *
   * @param object The object whose mark bit is to be written
   * @param value The value to which the mark bit will be set
   */
  @Inline
  private boolean testAndMark(ObjectReference object, Word value) {
    Word oldValue, markBit;
    do {
      oldValue = VM.objectModel.prepareAvailableBits(object);
      markBit = oldValue.and(inNurseryGC ? LOS_BIT_MASK : MARK_BIT);
      if (markBit.EQ(value)) return false;
    } while (!VM.objectModel.attemptAvailableBits(
        object, oldValue, oldValue.and(LOS_BIT_MASK.not()).or(value)));
    return true;
  }

  /**
   * Return true if the mark bit for an object has the given value.
   *
   * @param object The object whose mark bit is to be tested
   * @param value The value against which the mark bit will be tested
   * @return True if the mark bit for the object has the given value.
   */
  @Inline
  private boolean testMarkBit(ObjectReference object, Word value) {
    return VM.objectModel.readAvailableBitsWord(object).and(MARK_BIT).EQ(value);
  }

  /**
   * Return true if the object is in the logical nursery
   *
   * @param object The object whose status is to be tested
   * @return True if the object is in the logical nursery
   */
  @Inline
  private boolean isInNursery(ObjectReference object) {
    return VM.objectModel.readAvailableBitsWord(object).and(NURSERY_BIT).EQ(NURSERY_BIT);
  }

  /**
   * Return the size of the per-superpage header required by this system. In this case it is just
   * the underlying superpage header size.
   *
   * @return The size of the per-superpage header required by this system.
   */
  @Inline
  protected int superPageHeaderSize() {
    return Treadmill.headerSize();
  }

  /**
   * Return the size of the per-cell header for cells of a given class size.
   *
   * @return The size of the per-cell header for cells of a given class size.
   */
  @Inline
  protected int cellHeaderSize() {
    return 0;
  }

  /**
   * This is the treadmill used by the large object space.
   *
   * <p>Note that it depends on the specific local in use whether this is being used.
   *
   * @return The treadmill associated with this large object space.
   */
  public Treadmill getTreadmill() {
    return this.treadmill;
  }
}
コード例 #23
0
ファイル: Magic.java プロジェクト: alanweide/coff
 /** Get contents of (object + offset) and begin conditional critical section. */
 public static Word prepareWord(Object object, Offset offset) {
   if (VM.VerifyAssertions)
     VM._assert(VM.NOT_REACHED); // call site should have been hijacked by magic in compiler
   return Word.max();
 }
コード例 #24
0
ファイル: ImmortalSpace.java プロジェクト: vilay/check
 /** write the given value in the mark bit. */
 private static void writeMarkBit(ObjectReference object, Word value) {
   Word oldValue = ObjectModel.readAvailableBitsWord(object);
   Word newValue = oldValue.and(GC_MARK_BIT_MASK.not()).or(value);
   ObjectModel.writeAvailableBitsWord(object, newValue);
 }
コード例 #25
0
ファイル: ImmortalSpace.java プロジェクト: vilay/check
 /**
  * Prepare for a new collection increment. For the immortal collector we must flip the state of
  * the mark bit between collections.
  */
 public void prepare() {
   immortalMarkState = GC_MARK_BIT_MASK.sub(immortalMarkState);
 }
コード例 #26
0
ファイル: ImmortalSpace.java プロジェクト: vilay/check
/**
 * This class implements tracing for a simple immortal collection policy. Under this policy all that
 * is required is for the "collector" to propogate marks in a liveness trace. It does not actually
 * collect. This class does not hold any state, all methods are static.
 *
 * <p>$Id: ImmortalSpace.java,v 1.18 2005/01/19 02:49:02 steveb-oss Exp $
 *
 * @author Perry Cheng
 * @author <a href="http://cs.anu.edu.au/~Steve.Blackburn">Steve Blackburn</a>
 * @version $Revision: 1.18 $
 * @date $Date: 2005/01/19 02:49:02 $
 */
public final class ImmortalSpace extends Space implements Constants, Uninterruptible {

  /**
   * **************************************************************************
   *
   * <p>Class variables
   */
  static final Word GC_MARK_BIT_MASK = Word.one();

  public static Word immortalMarkState = Word.zero(); // when GC off, the initialization value

  /**
   * **************************************************************************
   *
   * <p>Initialization
   */

  /**
   * The caller specifies the region of virtual memory to be used for this space. If this region
   * conflicts with an existing space, then the constructor will fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param start The start address of the space in virtual memory
   * @param bytes The size of the space in virtual memory, in bytes
   */
  public ImmortalSpace(String name, int pageBudget, Address start, Extent bytes) {
    super(name, false, true, start, bytes);
    pr = new MonotonePageResource(pageBudget, this, start, extent);
  }

  /**
   * Construct a space of a given number of megabytes in size.
   *
   * <p>The caller specifies the amount virtual memory to be used for this space <i>in
   * megabytes</i>. If there is insufficient address space, then the constructor will fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param mb The size of the space in virtual memory, in megabytes (MB)
   */
  public ImmortalSpace(String name, int pageBudget, int mb) {
    super(name, false, true, mb);
    pr = new MonotonePageResource(pageBudget, this, start, extent);
  }

  /**
   * Construct a space that consumes a given fraction of the available virtual memory.
   *
   * <p>The caller specifies the amount virtual memory to be used for this space <i>as a fraction of
   * the total available</i>. If there is insufficient address space, then the constructor will
   * fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param frac The size of the space in virtual memory, as a fraction of all available virtual
   *     memory
   */
  public ImmortalSpace(String name, int pageBudget, float frac) {
    super(name, false, true, frac);
    pr = new MonotonePageResource(pageBudget, this, start, extent);
  }

  /**
   * Construct a space that consumes a given number of megabytes of virtual memory, at either the
   * top or bottom of the available virtual memory.
   *
   * <p>The caller specifies the amount virtual memory to be used for this space <i>in
   * megabytes</i>, and whether it should be at the top or bottom of the available virtual memory.
   * If the request clashes with existing virtual memory allocations, then the constructor will
   * fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param mb The size of the space in virtual memory, in megabytes (MB)
   * @param top Should this space be at the top (or bottom) of the available virtual memory.
   */
  public ImmortalSpace(String name, int pageBudget, int mb, boolean top) {
    super(name, false, true, mb, top);
    pr = new MonotonePageResource(pageBudget, this, start, extent);
  }

  /**
   * Construct a space that consumes a given fraction of the available virtual memory, at either the
   * top or bottom of the available virtual memory.
   *
   * <p>The caller specifies the amount virtual memory to be used for this space <i>as a fraction of
   * the total available</i>, and whether it should be at the top or bottom of the available virtual
   * memory. If the request clashes with existing virtual memory allocations, then the constructor
   * will fail.
   *
   * @param name The name of this space (used when printing error messages etc)
   * @param pageBudget The number of pages this space may consume before consulting the plan
   * @param frac The size of the space in virtual memory, as a fraction of all available virtual
   *     memory
   * @param top Should this space be at the top (or bottom) of the available virtual memory.
   */
  public ImmortalSpace(String name, int pageBudget, float frac, boolean top) {
    super(name, false, true, frac, top);
    pr = new MonotonePageResource(pageBudget, this, start, extent);
  }

  /**
   * **************************************************************************
   *
   * <p>Object header manipulations
   */

  /** test to see if the mark bit has the given value */
  private static boolean testMarkBit(ObjectReference object, Word value) {
    return !(ObjectModel.readAvailableBitsWord(object).and(value).isZero());
  }

  /** write the given value in the mark bit. */
  private static void writeMarkBit(ObjectReference object, Word value) {
    Word oldValue = ObjectModel.readAvailableBitsWord(object);
    Word newValue = oldValue.and(GC_MARK_BIT_MASK.not()).or(value);
    ObjectModel.writeAvailableBitsWord(object, newValue);
  }

  /** atomically write the given value in the mark bit. */
  private static void atomicWriteMarkBit(ObjectReference object, Word value) {
    while (true) {
      Word oldValue = ObjectModel.prepareAvailableBits(object);
      Word newValue = oldValue.and(GC_MARK_BIT_MASK.not()).or(value);
      if (ObjectModel.attemptAvailableBits(object, oldValue, newValue)) break;
    }
  }

  /**
   * Used to mark boot image objects during a parallel scan of objects during GC Returns true if
   * marking was done.
   */
  private static boolean testAndMark(ObjectReference object, Word value) throws InlinePragma {
    Word oldValue;
    do {
      oldValue = ObjectModel.prepareAvailableBits(object);
      Word markBit = oldValue.and(GC_MARK_BIT_MASK);
      if (markBit.EQ(value)) return false;
    } while (!ObjectModel.attemptAvailableBits(object, oldValue, oldValue.xor(GC_MARK_BIT_MASK)));
    return true;
  }

  /**
   * Trace a reference to an object under an immortal collection policy. If the object is not
   * already marked, enqueue the object for subsequent processing. The object is marked as (an
   * atomic) side-effect of checking whether already marked.
   *
   * @param object The object to be traced.
   */
  public final ObjectReference traceObject(ObjectReference object) throws InlinePragma {
    if (testAndMark(object, immortalMarkState)) Plan.enqueue(object);
    return object;
  }

  public static void postAlloc(ObjectReference object) throws InlinePragma {
    writeMarkBit(object, immortalMarkState);
  }

  /**
   * Prepare for a new collection increment. For the immortal collector we must flip the state of
   * the mark bit between collections.
   */
  public void prepare() {
    immortalMarkState = GC_MARK_BIT_MASK.sub(immortalMarkState);
  }

  public void release() {}

  /**
   * Release an allocated page or pages. In this case we do nothing because we only release pages
   * enmasse.
   *
   * @param start The address of the start of the page or pages
   */
  public final void release(Address start) throws InlinePragma {
    Assert._assert(false); // this policy only releases pages enmasse
  }

  public final boolean isLive(ObjectReference object) throws InlinePragma {
    return true;
  }

  /**
   * Returns if the object in question is currently thought to be reachable. This is done by
   * comparing the mark bit to the current mark state. For the immortal collector reachable and live
   * are different, making this method necessary.
   *
   * @param object The address of an object in immortal space to test
   * @return True if <code>ref</code> may be a reachable object (e.g., having the current mark
   *     state). While all immortal objects are live, some may be unreachable.
   */
  public static boolean isReachable(ObjectReference object) {
    return (ObjectModel.readAvailableBitsWord(object).and(GC_MARK_BIT_MASK).EQ(immortalMarkState));
  }
}
コード例 #27
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
  @NoInline
  @NoNullCheck
  @Unpreemptible
  public static void lock(Object o, Offset lockOffset) {
    if (STATS) fastLocks++;

    Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());

    for (int cnt = 0; ; cnt++) {
      Word old = Magic.getWordAtOffset(o, lockOffset);
      Word stat = old.and(TL_STAT_MASK);
      boolean tryToInflate = false;
      if (stat.EQ(TL_STAT_BIASABLE)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (ENABLE_BIASED_LOCKING) {
            // lock is unbiased, bias it in our favor and grab it
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) {
              Magic.isync();
              return;
            }
          } else {
            // lock is unbiased but biasing is NOT allowed, so turn it into
            // a thin lock
            if (Synchronization.tryCompareAndSwap(
                o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) {
              Magic.isync();
              return;
            }
          }
        } else if (id.EQ(threadId)) {
          // lock is biased in our favor
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            setDedicatedU16(o, lockOffset, changed);
            return;
          } else {
            tryToInflate = true;
          }
        } else {
          if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) {
            continue; // don't spin, since it's thin now
          }
        }
      } else if (stat.EQ(TL_STAT_THIN)) {
        Word id = old.and(TL_THREAD_ID_MASK);
        if (id.isZero()) {
          if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) {
            Magic.isync();
            return;
          }
        } else if (id.EQ(threadId)) {
          Word changed = old.plus(TL_LOCK_COUNT_UNIT);
          if (changed.and(TL_LOCK_COUNT_MASK).isZero()) {
            tryToInflate = true;
          } else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
            Magic.isync();
            return;
          }
        } else if (cnt > retryLimit) {
          tryToInflate = true;
        }
      } else {
        if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
        // lock is fat.  contend on it.
        if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) {
          return;
        }
      }

      if (tryToInflate) {
        if (STATS) slowLocks++;
        // the lock is not fat, is owned by someone else, or else the count wrapped.
        // attempt to inflate it (this may fail, in which case we'll just harmlessly
        // loop around) and lock it (may also fail, if we get the wrong lock).  if it
        // succeeds, we're done.
        // NB: this calls into our attemptToMarkInflated() method, which will do the
        // Right Thing if the lock is biased to someone else.
        if (inflateAndLock(o, lockOffset)) {
          return;
        }
      } else {
        RVMThread.yield();
      }
    }
  }
コード例 #28
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @NoInline
 @NoNullCheck
 @Unpreemptible
 public static void unlock(Object o, Offset lockOffset) {
   Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId());
   for (int cnt = 0; ; cnt++) {
     Word old = Magic.getWordAtOffset(o, lockOffset);
     Word stat = old.and(TL_STAT_MASK);
     if (stat.EQ(TL_STAT_BIASABLE)) {
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           RVMThread.raiseIllegalMonitorStateException(
               "biased unlocking: we own this object but the count is already zero", o);
         }
         setDedicatedU16(o, lockOffset, old.minus(TL_LOCK_COUNT_UNIT));
         return;
       } else {
         RVMThread.raiseIllegalMonitorStateException(
             "biased unlocking: we don't own this object", o);
       }
     } else if (stat.EQ(TL_STAT_THIN)) {
       Magic.sync();
       Word id = old.and(TL_THREAD_ID_MASK);
       if (id.EQ(threadId)) {
         Word changed;
         if (old.and(TL_LOCK_COUNT_MASK).isZero()) {
           changed = old.and(TL_UNLOCK_MASK).or(TL_STAT_THIN);
         } else {
           changed = old.minus(TL_LOCK_COUNT_UNIT);
         }
         if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) {
           return;
         }
       } else {
         if (false) {
           VM.sysWriteln("threadId = ", threadId);
           VM.sysWriteln("id = ", id);
         }
         RVMThread.raiseIllegalMonitorStateException(
             "thin unlocking: we don't own this object", o);
       }
     } else {
       if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT));
       // fat unlock
       Lock.getLock(getLockIndex(old)).unlockHeavy(o);
       return;
     }
   }
 }
コード例 #29
0
ファイル: VM_Memory.java プロジェクト: rmcilroy/HeraJVM
 public static boolean isPageAligned(Address addr) {
   Word pagesizeMask = Word.fromIntZeroExtend(getPagesize() - 1);
   return addr.toWord().and(pagesizeMask).isZero();
 }
コード例 #30
0
ファイル: ThinLock.java プロジェクト: jaggerlink/cs356
 @Inline
 @Uninterruptible
 public static boolean isFat(Word lockWord) {
   return lockWord.and(TL_STAT_MASK).EQ(TL_STAT_FAT);
 }