/** * Copy numbytes from src to dst. Assumption either the ranges are non overlapping, or src >= dst * + 4. Also, src and dst are 4 byte aligned and numBytes is a multiple of 4. * * @param dst the destination addr * @param src the source addr * @param numBytes the number of bytes top copy */ @Inline public static void aligned32Copy(Address dst, Address src, Offset numBytes) { if (USE_NATIVE && numBytes.sGT(Offset.fromIntSignExtend(NATIVE_THRESHOLD))) { memcopy(dst, src, numBytes.toWord().toExtent()); } else { if (VM.BuildFor64Addr) { Word wordMask = Word.one().lsh(LOG_BYTES_IN_ADDRESS).minus(Word.one()); Word srcAlignment = src.toWord().and(wordMask); if (srcAlignment.EQ(dst.toWord().and(wordMask))) { Offset i = Offset.zero(); if (srcAlignment.EQ(Word.fromIntZeroExtend(BYTES_IN_INT))) { dst.store(src.loadInt(i), i); i = i.plus(BYTES_IN_INT); } Word endAlignment = srcAlignment.plus(numBytes).and(Word.fromIntSignExtend(BYTES_IN_ADDRESS - 1)); numBytes = numBytes.minus(endAlignment.toOffset()); for (; i.sLT(numBytes); i = i.plus(BYTES_IN_ADDRESS)) { dst.store(src.loadWord(i), i); } if (!endAlignment.isZero()) { dst.store(src.loadInt(i), i); } return; } } // normal case: 32 bit or (64 bit not aligned) for (Offset i = Offset.zero(); i.sLT(numBytes); i = i.plus(BYTES_IN_INT)) { dst.store(src.loadInt(i), i); } } }
@NoInline @Unpreemptible public static boolean casFromBiased( Object o, Offset lockOffset, Word oldLockWord, Word changed, int cnt) { RVMThread me = RVMThread.getCurrentThread(); Word id = oldLockWord.and(TL_THREAD_ID_MASK); if (id.isZero()) { if (false) VM.sysWriteln("id is zero - easy case."); return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed); } else { if (false) VM.sysWriteln("id = ", id); int slot = id.toInt() >> TL_THREAD_ID_SHIFT; if (false) VM.sysWriteln("slot = ", slot); RVMThread owner = RVMThread.threadBySlot[slot]; if (owner == me /* I own it, so I can unbias it trivially. This occurs when we are inflating due to, for example, wait() */ || owner == null /* the thread that owned it is dead, so it's safe to unbias. */) { // note that we use a CAS here, but it's only needed in the case // that owner==null, since in that case some other thread may also // be unbiasing. return Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed); } else { boolean result = false; // NB. this may stop a thread other than the one that had the bias, // if that thread died and some other thread took its slot. that's // why we do a CAS below. it's only needed if some other thread // had seen the owner be null (which may happen if we came here after // a new thread took the slot while someone else came here when the // slot was still null). if it was the case that everyone else had // seen a non-null owner, then the pair handshake would serve as // sufficient synchronization (the id would identify the set of threads // that shared that id's communicationLock). oddly, that means that // this whole thing could be "simplified" to acquire the // communicationLock even if the owner was null. but that would be // goofy. if (false) VM.sysWriteln("entering pair handshake"); owner.beginPairHandshake(); if (false) VM.sysWriteln("done with that"); Word newLockWord = Magic.getWordAtOffset(o, lockOffset); result = Synchronization.tryCompareAndSwap(o, lockOffset, oldLockWord, changed); owner.endPairHandshake(); if (false) VM.sysWriteln("that worked."); return result; } } }
/** * Copy copyBytes from src to dst. Assumption: either the ranges are non overlapping, or {@code * src >= dst + 4}. Also, src and dst are 4 byte aligned and numBytes is a multiple of 4. * * @param dst the destination addr * @param src the source addr * @param copyBytes the number of bytes top copy */ public static void aligned32Copy(Address dst, Address src, int copyBytes) { if (VM.VerifyAssertions) { VM._assert(copyBytes >= 0); VM._assert((copyBytes & (BYTES_IN_INT - 1)) == 0); VM._assert(src.toWord().and(Word.fromIntZeroExtend(BYTES_IN_INT - 1)).isZero()); VM._assert(dst.toWord().and(Word.fromIntZeroExtend(BYTES_IN_INT - 1)).isZero()); VM._assert(src.plus(copyBytes).LE(dst) || src.GE(dst.plus(BYTES_IN_INT))); } if (USE_NATIVE && copyBytes > NATIVE_THRESHOLD) { memcopy(dst, src, copyBytes); } else { Offset numBytes = Offset.fromIntSignExtend(copyBytes); if (BYTES_IN_COPY == 8 && copyBytes != 0) { Word wordMask = Word.fromIntZeroExtend(BYTES_IN_COPY - 1); Word srcAlignment = src.toWord().and(wordMask); if (srcAlignment.EQ(dst.toWord().and(wordMask))) { Offset i = Offset.zero(); if (srcAlignment.EQ(Word.fromIntZeroExtend(BYTES_IN_INT))) { copy4Bytes(dst.plus(i), src.plus(i)); i = i.plus(BYTES_IN_INT); } Word endAlignment = srcAlignment.plus(numBytes).and(wordMask); numBytes = numBytes.minus(endAlignment.toOffset()); for (; i.sLT(numBytes); i = i.plus(BYTES_IN_COPY)) { copy8Bytes(dst.plus(i), src.plus(i)); } if (!endAlignment.isZero()) { copy4Bytes(dst.plus(i), src.plus(i)); } return; } } // normal case: 32 bit or (64 bit not aligned) for (Offset i = Offset.zero(); i.sLT(numBytes); i = i.plus(BYTES_IN_INT)) { copy4Bytes(dst.plus(i), src.plus(i)); } } }
@NoInline @NoNullCheck @Unpreemptible public static void lock(Object o, Offset lockOffset) { if (STATS) fastLocks++; Word threadId = Word.fromIntZeroExtend(RVMThread.getCurrentThread().getLockingId()); for (int cnt = 0; ; cnt++) { Word old = Magic.getWordAtOffset(o, lockOffset); Word stat = old.and(TL_STAT_MASK); boolean tryToInflate = false; if (stat.EQ(TL_STAT_BIASABLE)) { Word id = old.and(TL_THREAD_ID_MASK); if (id.isZero()) { if (ENABLE_BIASED_LOCKING) { // lock is unbiased, bias it in our favor and grab it if (Synchronization.tryCompareAndSwap( o, lockOffset, old, old.or(threadId).plus(TL_LOCK_COUNT_UNIT))) { Magic.isync(); return; } } else { // lock is unbiased but biasing is NOT allowed, so turn it into // a thin lock if (Synchronization.tryCompareAndSwap( o, lockOffset, old, old.or(threadId).or(TL_STAT_THIN))) { Magic.isync(); return; } } } else if (id.EQ(threadId)) { // lock is biased in our favor Word changed = old.plus(TL_LOCK_COUNT_UNIT); if (!changed.and(TL_LOCK_COUNT_MASK).isZero()) { setDedicatedU16(o, lockOffset, changed); return; } else { tryToInflate = true; } } else { if (casFromBiased(o, lockOffset, old, biasBitsToThinBits(old), cnt)) { continue; // don't spin, since it's thin now } } } else if (stat.EQ(TL_STAT_THIN)) { Word id = old.and(TL_THREAD_ID_MASK); if (id.isZero()) { if (Synchronization.tryCompareAndSwap(o, lockOffset, old, old.or(threadId))) { Magic.isync(); return; } } else if (id.EQ(threadId)) { Word changed = old.plus(TL_LOCK_COUNT_UNIT); if (changed.and(TL_LOCK_COUNT_MASK).isZero()) { tryToInflate = true; } else if (Synchronization.tryCompareAndSwap(o, lockOffset, old, changed)) { Magic.isync(); return; } } else if (cnt > retryLimit) { tryToInflate = true; } } else { if (VM.VerifyAssertions) VM._assert(stat.EQ(TL_STAT_FAT)); // lock is fat. contend on it. if (Lock.getLock(getLockIndex(old)).lockHeavy(o)) { return; } } if (tryToInflate) { if (STATS) slowLocks++; // the lock is not fat, is owned by someone else, or else the count wrapped. // attempt to inflate it (this may fail, in which case we'll just harmlessly // loop around) and lock it (may also fail, if we get the wrong lock). if it // succeeds, we're done. // NB: this calls into our attemptToMarkInflated() method, which will do the // Right Thing if the lock is biased to someone else. if (inflateAndLock(o, lockOffset)) { return; } } else { RVMThread.yield(); } } }