static Address mapLimit(final int units, final int heads) { // final int WORD_SIZE = ArchitecturalWord.getModel().bytesInWord(); final int WORD_SIZE = 4; return baseAddress.plus( Math.floorDiv(((units + heads + 1) * WORD_SIZE * 2) + (PAGE_SIZE - 1), PAGE_SIZE) * PAGE_SIZE); }
/** * Store a value into a reference field of an object. * * @param object The object to store the field of. * @param index The field index. * @param value The value to store. */ public void storeReferenceField(ObjectReference object, int index, ObjectReference value) { int limit = ObjectModel.getRefs(object); if (Trace.isEnabled(Item.STORE) || ObjectModel.isWatched(object)) { Trace.printf( Item.STORE, "[%s].object[%d/%d] = %s", ObjectModel.getString(object), index, limit, value.toString()); } check(!object.isNull(), "Object can not be null"); check(index >= 0, "Index must be non-negative"); check(index < limit, "Index " + index + " out of bounds " + limit); Address referenceSlot = ObjectModel.getRefSlot(object, index); if (ActivePlan.constraints.needsWriteBarrier()) { context.writeBarrier(object, referenceSlot, value, null, null, Plan.AASTORE_WRITE_BARRIER); if (gcEveryWB) { gc(); } } else { referenceSlot.store(value); } }
/** * Copy numbytes from src to dst. Assumption either the ranges are non overlapping, or src >= dst * + 4. Also, src and dst are 4 byte aligned and numBytes is a multiple of 4. * * @param dst the destination addr * @param src the source addr * @param numBytes the number of bytes top copy */ @Inline public static void aligned32Copy(Address dst, Address src, Offset numBytes) { if (USE_NATIVE && numBytes.sGT(Offset.fromIntSignExtend(NATIVE_THRESHOLD))) { memcopy(dst, src, numBytes.toWord().toExtent()); } else { if (VM.BuildFor64Addr) { Word wordMask = Word.one().lsh(LOG_BYTES_IN_ADDRESS).minus(Word.one()); Word srcAlignment = src.toWord().and(wordMask); if (srcAlignment.EQ(dst.toWord().and(wordMask))) { Offset i = Offset.zero(); if (srcAlignment.EQ(Word.fromIntZeroExtend(BYTES_IN_INT))) { dst.store(src.loadInt(i), i); i = i.plus(BYTES_IN_INT); } Word endAlignment = srcAlignment.plus(numBytes).and(Word.fromIntSignExtend(BYTES_IN_ADDRESS - 1)); numBytes = numBytes.minus(endAlignment.toOffset()); for (; i.sLT(numBytes); i = i.plus(BYTES_IN_ADDRESS)) { dst.store(src.loadWord(i), i); } if (!endAlignment.isZero()) { dst.store(src.loadInt(i), i); } return; } } // normal case: 32 bit or (64 bit not aligned) for (Offset i = Offset.zero(); i.sLT(numBytes); i = i.plus(BYTES_IN_INT)) { dst.store(src.loadInt(i), i); } } }
/** * Takes the passed address and (atomically) performs any read barrier actions before returning it * as an object. * * @param tmp The non-zero referent address * @return The referent object. */ @Uninterruptible @Inline Object getInternal() { if (RVMType.JavaLangRefReferenceReferenceField.madeTraced()) { if (NEEDS_OBJECT_GETFIELD_BARRIER) { return Barriers.objectFieldRead( this, RVMType.JavaLangRefReferenceReferenceField.getOffset(), RVMType.JavaLangRefReferenceReferenceField.getId()); } else { return Magic.getObjectAtOffset( this, RVMType.JavaLangRefReferenceReferenceField.getOffset(), RVMType.JavaLangRefReferenceReferenceField.getId()); } } else { Address tmp = _referent; if (tmp.isZero()) { return null; } else { Object ref = Magic.addressAsObject(tmp); if (Barriers.NEEDS_JAVA_LANG_REFERENCE_READ_BARRIER) { ref = Barriers.javaLangReferenceReadBarrier(ref); } return ref; } } }
/** All Scalars */ public static void scalar(Object object, TransitiveClosure trace) { Address base = Magic.objectAsAddress(object); int[] offsets = ObjectModel.getObjectType(object).asClass().getReferenceOffsets(); for (int i = 0; i < offsets.length; i++) { trace.processEdge(ObjectReference.fromObject(object), base.plus(offsets[i])); } }
@Inline private boolean acquireRecyclableBlockAddressOrder() { if (recyclableExhausted) { if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) { Log.writeln("[no recyclable available]"); } return false; } int markState = 0; boolean usable = false; while (!usable) { Address next = recyclableBlock.plus(BYTES_IN_BLOCK); if (recyclableBlock.isZero() || ImmixSpace.isRecycleAllocChunkAligned(next)) { recyclableBlock = space.acquireReusableBlocks(); if (recyclableBlock.isZero()) { recyclableExhausted = true; if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) { Log.writeln("[recyclable exhausted]"); } line = LINES_IN_BLOCK; return false; } } else { recyclableBlock = next; } markState = Block.getBlockMarkState(recyclableBlock); usable = (markState > 0 && markState <= ImmixSpace.getReusuableMarkStateThreshold(copy)); if (copy && Block.isDefragSource(recyclableBlock)) usable = false; } if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!Block.isUnused(recyclableBlock)); Block.setBlockAsReused(recyclableBlock); lineUseCount += (LINES_IN_BLOCK - markState); return true; // found something good }
/** Reference Arrays */ public static void referenceArray(Object object, TransitiveClosure trace) { Address base = Magic.objectAsAddress(object); int length = ObjectModel.getArrayLength(object); for (int i = 0; i < length; i++) { trace.processEdge(ObjectReference.fromObject(object), base.plus(i << LOG_BYTES_IN_ADDRESS)); } }
/** * Test whether a memory range is set to a given integer value * * @param start The address to start checking at * @param bytes The size of the region to check, in bytes * @param verbose If true, produce verbose output * @param value The value to which the memory should be set */ private static boolean isSet(Address start, int bytes, boolean verbose, int value) /* Inlining this loop into the uninterruptible code can * cause/encourage the GCP into moving a get_obj_tib into the * interruptible region where the tib is being installed via an * int_store */ throws NoInlinePragma { if (Assert.VERIFY_ASSERTIONS) assertAligned(bytes); for (int i = 0; i < bytes; i += BYTES_IN_INT) if (start.loadInt(Offset.fromInt(i)) != value) { if (verbose) { Log.prependThreadId(); Log.write("Memory range does not contain only value "); Log.writeln(value); Log.write("Non-zero range: "); Log.write(start); Log.write(" .. "); Log.writeln(start.add(bytes)); Log.write("First bad value at "); Log.writeln(start.add(i)); dumpMemory(start, 0, bytes); } return false; } return true; }
/** * Push an address onto the address stack. * * @param object the object to be pushed onto the object queue */ @Inline public final void push(ObjectReference object) { Address addr = object.toAddress(); if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!addr.isZero()); checkHeadInsert(1); uncheckedHeadInsert(addr); }
/** * Set a region of memory * * @param start The start of the region to be zeroed (must be 4-byte aligned) * @param bytes The number of bytes to be zeroed (must be 4-byte aligned) * @param value The value to which the integers in the region should be set */ public static void set(Address start, int bytes, int value) throws InlinePragma { if (Assert.VERIFY_ASSERTIONS) { assertAligned(start); assertAligned(bytes); } Address end = start.add(bytes); for (Address addr = start; addr.LT(end); addr = addr.add(BYTES_IN_INT)) addr.store(value); }
/** * Zero a small region of memory * * @param start The start of the region to be zeroed (must be 4-byte aligned) * @param bytes The number of bytes to be zeroed (must be 4-byte aligned) */ public static void zeroSmall(Address start, Extent bytes) throws InlinePragma { if (Assert.VERIFY_ASSERTIONS) { assertAligned(start); assertAligned(bytes); } Address end = start.add(bytes); for (Address addr = start; addr.LT(end); addr = addr.add(BYTES_IN_INT)) addr.store(0); }
/** * Given a slot (ie the address of an ObjectReference), ensure that the referent will not move for * the rest of the GC. This is achieved by calling the precopyObject method. * * @param slot The slot to check */ @Inline public final void processPrecopyEdge(Address slot) { ObjectReference child = slot.loadObjectReference(); if (!child.isNull()) { child = precopyObject(child); slot.store(child); } }
/** Called after we've successfully loaded the shared library */ private void callOnLoad() { // Run any JNI_OnLoad functions defined within the library Address JNI_OnLoadAddress = getSymbol("JNI_OnLoad"); if (!JNI_OnLoadAddress.isZero()) { int version = runJNI_OnLoad(JNI_OnLoadAddress); checkJNIVersion(version); } }
/** * Resolve a symbol to an address in a currently loaded dynamic library. * * @return the address of the symbol of Address.zero() if it cannot be resolved */ public static synchronized Address resolveSymbol(String symbol) { for (VM_DynamicLibrary lib : dynamicLibraries.values()) { Address symbolAddress = lib.getSymbol(symbol); if (!symbolAddress.isZero()) { return symbolAddress; } } return Address.zero(); }
/** Sweep through the large pages, releasing all superpages on the "from space" treadmill. */ private void sweepLargePages(boolean sweepNursery) { while (true) { Address cell = sweepNursery ? treadmill.popNursery() : treadmill.pop(); if (cell.isZero()) break; release(getSuperPage(cell)); } if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(sweepNursery ? treadmill.nurseryEmpty() : treadmill.fromSpaceEmpty()); }
/** * Store a value into the data field of an object. * * @param object The object to store the field of. * @param index The field index. * @param value The value to store. */ public void storeDataField(ObjectReference object, int index, int value) { int limit = ObjectModel.getDataCount(object); check(!object.isNull(), "Object can not be null"); check(index >= 0, "Index must be non-negative"); check(index < limit, "Index " + index + " out of bounds " + limit); Address ref = ObjectModel.getDataSlot(object, index); ref.store(value); Trace.trace(Item.STORE, "%s.[%d] = %d", object.toString(), index, value); }
/** * Demand zero mmaps an area of virtual memory. * * @param start the address of the start of the area to be mapped * @param size the size, in bytes, of the area to be mapped * @return 0 if successful, otherwise the system errno */ public final int dzmmap(Address start, int size) { Address result = org.jikesrvm.runtime.Memory.dzmmap(start, Extent.fromIntZeroExtend(size)); if (result.EQ(start)) return 0; if (result.GT(Address.fromIntZeroExtend(127))) { VM.sysWrite("demand zero mmap with MAP_FIXED on ", start); VM.sysWriteln(" returned some other address", result); VM.sysFail("mmap with MAP_FIXED has unexpected behavior"); } return result.toInt(); }
public EEPRO100TxFD(ResourceManager rm) { // Create a large enough buffer final int size = (TxFDSize + DataBufferSize) + 16 /* alignment */; this.data = new byte[size]; this.mem = rm.asMemoryResource(data); final Address memAddr = mem.getAddress(); this.firstDPDOffset = 0; this.firstDPDAddress = memAddr.add(firstDPDOffset); }
/** * Load and return the value of a data field of an object. * * @param object The object to load the field of. * @param index The field index. */ public int loadDataField(ObjectReference object, int index) { int limit = ObjectModel.getDataCount(object); check(!object.isNull(), "Object can not be null"); check(index >= 0, "Index must be non-negative"); check(index < limit, "Index " + index + " out of bounds " + limit); Address dataSlot = ObjectModel.getDataSlot(object, index); int result = dataSlot.loadInt(); Trace.trace( Item.LOAD, "[%s].int[%d] returned [%d]", ObjectModel.getString(object), index, result); return result; }
/** * Low level copy of <code>copyBytes</code> bytes from <code>src[srcPos]</code> to <code> * dst[dstPos]</code>. * * <p>Assumption <code>src != dst || (srcPos >= dstPos)</code> and element size is 8 bytes. * * @param dstPtr The destination start address * @param srcPtr The source start address * @param copyBytes The number of bytes to be copied */ public static void aligned64Copy(Address dstPtr, Address srcPtr, int copyBytes) { if (USE_NATIVE && copyBytes > NATIVE_THRESHOLD) { memcopy(dstPtr, srcPtr, copyBytes); } else { // The elements of long[] and double[] are always doubleword aligned // therefore we can do 64 bit load/stores without worrying about alignment. Address endPtr = srcPtr.plus(copyBytes); while (srcPtr.LT(endPtr)) { copy8Bytes(dstPtr, srcPtr); srcPtr = srcPtr.plus(8); dstPtr = dstPtr.plus(8); } } }
// return (address of) next ref in the current "frame" on the // threads JNIEnvironment stack of refs // When at the end of the current frame, update register locations to point // to the non-volatile registers saved in the JNI transition frame. // public Address getNextReferenceAddress() { // first report jni refs in the current frame in the jniRef side stack // until all in the frame are reported // if (jniNextRef > jniFramePtr) { Address ref_address = Magic.objectAsAddress(jniRefs).plus(jniNextRef); jniNextRef -= BYTES_IN_ADDRESS; return ref_address; } // no more refs to report, before returning 0, setup for processing // the next jni frame, if any // jniNextRef -> savedFramePtr for another "frame" of refs for another // sequence of Native C frames lower in the stack, or to 0 if this is the // last jni frame in the JNIRefs stack. If more frames, initialize for a // later scan of those refs. // if (jniFramePtr > 0) { jniFramePtr = jniRefs.get(jniFramePtr >> LOG_BYTES_IN_ADDRESS).toInt(); jniNextRef = jniNextRef - BYTES_IN_ADDRESS; } // set register locations for non-volatiles to point to registers saved in // the JNI transition frame at a fixed negative offset from the callers FP. // the save non-volatiles are EBX EBP and EDI. // registerLocations.set(EDI.value(), framePtr.plus(JNICompiler.EDI_SAVE_OFFSET)); registerLocations.set(EBX.value(), framePtr.plus(JNICompiler.EBX_SAVE_OFFSET)); registerLocations.set(EBP.value(), framePtr.plus(JNICompiler.EBP_SAVE_OFFSET)); return Address.zero(); // no more refs to report }
private void zeroBlock(Address block) { // FIXME: efficiency check here! if (VM.VERIFY_ASSERTIONS) VM.assertions._assert( block.toWord().and(Word.fromIntSignExtend(BYTES_IN_BLOCK - 1)).isZero()); VM.memory.zero(block, Extent.fromIntZeroExtend(BYTES_IN_BLOCK)); }
public static void dumpMemory(Address start, int beforeBytes, int afterBytes) { beforeBytes = alignDown(beforeBytes, BYTES_IN_ADDRESS); afterBytes = alignUp(afterBytes, BYTES_IN_ADDRESS); VM.sysWrite("---- Dumping memory from "); VM.sysWrite(start.minus(beforeBytes)); VM.sysWrite(" to "); VM.sysWrite(start.plus(afterBytes)); VM.sysWrite(" ----\n"); for (int i = -beforeBytes; i < afterBytes; i += BYTES_IN_ADDRESS) { VM.sysWrite(i, ": "); VM.sysWrite(start.plus(i)); Word value = start.plus(i).loadWord(); VM.sysWriteln(" ", value); } }
/** * Gets the compiled code of a given stackframe. * * @param sf Stackframe pointer * @return The compiled code */ final VmCompiledCode getCompiledCode(Address sf) { final int ccid = sf.loadInt(getMethodIdOffset(sf)); if (ccid == 0) { return null; } else { return VmUtils.getVm().getCompiledMethods().get(ccid); } }
@Inline public static Address alignUp(Address address, int alignment) { return address .plus(alignment - 1) .toWord() .and(Word.fromIntSignExtend(~(alignment - 1))) .toAddress(); }
/** * Pop an address from the address stack, return zero if the stack is empty. * * @return The next address in the address stack, or zero if the stack is empty */ @Inline public final Address pop() { if (checkDequeue(1)) { return uncheckedDequeue(); } else { return Address.zero(); } }
/** * Allocate space for a new object. This is frequently executed code and the coding is * deliberaetly sensitive to the optimizing compiler. After changing this, always check the IR/MC * that is generated. * * @param bytes The number of bytes allocated * @param align The requested alignment * @param offset The offset from the alignment * @return The address of the first byte of the allocated region */ @Inline public final Address alloc(int bytes, int align, int offset) { /* establish how much we need */ Address start = alignAllocationNoFill(cursor, align, offset); Address end = start.plus(bytes); /* check whether we've exceeded the limit */ if (end.GT(limit)) { if (bytes > BYTES_IN_LINE) return overflowAlloc(bytes, align, offset); else return allocSlowHot(bytes, align, offset); } /* sufficient memory is available, so we can finish performing the allocation */ fillAlignmentGap(cursor, start); cursor = end; return start; }
/** * Gets the previous frame (if any) * * @param sf The stackframe to get the previous frame from. * @return The previous frame or null. */ @KernelSpace @Internal public final Address getPrevious(Address sf) { if (isValid(sf)) { return sf.loadAddress(getPreviousOffset(sf)); } else { return null; } }
/** * Trace a reference during GC. This involves determining which collection policy applies and * calling the appropriate <code>trace</code> method. * * @param target The object the interior edge points within. * @param slot The location of the interior edge. * @param root True if this is a root edge. */ public final void processInteriorEdge(ObjectReference target, Address slot, boolean root) { Address interiorRef = slot.loadAddress(); Offset offset = interiorRef.diff(target.toAddress()); ObjectReference newTarget = traceObject(target, root); if (VM.VERIFY_ASSERTIONS) { if (offset.sLT(Offset.zero()) || offset.sGT(Offset.fromIntSignExtend(1 << 24))) { // There is probably no object this large Log.writeln("ERROR: Suspiciously large delta to interior pointer"); Log.write(" object base = "); Log.writeln(target); Log.write(" interior reference = "); Log.writeln(interiorRef); Log.write(" delta = "); Log.writeln(offset); VM.assertions._assert(false); } } slot.store(newTarget.toAddress().plus(offset)); }
/** * Allocate space for a new object. This is frequently executed code and the coding is * deliberaetly sensitive to the optimizing compiler. After changing this, always check the IR/MC * that is generated. * * @param bytes The number of bytes allocated * @param align The requested alignment * @param offset The offset from the alignment * @return The address of the first byte of the allocated region */ public final Address overflowAlloc(int bytes, int align, int offset) { /* establish how much we need */ Address start = alignAllocationNoFill(largeCursor, align, offset); Address end = start.plus(bytes); /* check whether we've exceeded the limit */ if (end.GT(largeLimit)) { requestForLarge = true; Address rtn = allocSlowInline(bytes, align, offset); requestForLarge = false; return rtn; } /* sufficient memory is available, so we can finish performing the allocation */ fillAlignmentGap(largeCursor, start); largeCursor = end; return start; }