/** * Notify that the concurrent phase has completed successfully. This must only be called by a * single thread after it has determined that the phase has been completed successfully. */ public static void notifyConcurrentPhaseComplete() { if (Options.verbose.getValue() >= 2) { Log.write("< Concurrent phase "); Log.write(getName(concurrentPhaseId)); Log.writeln(" complete >"); } /* Concurrent phase is complete*/ concurrentPhaseId = 0; /* Remove it from the stack */ popScheduledPhase(); /* Pop the next phase off the stack */ int nextScheduledPhase = getNextPhase(); if (nextScheduledPhase > 0) { short schedule = getSchedule(nextScheduledPhase); /* A concurrent phase, lets wake up and do it all again */ if (schedule == SCHEDULE_CONCURRENT) { concurrentPhaseId = getPhaseId(nextScheduledPhase); scheduleConcurrentWorkers(); return; } /* Push phase back on and resume atomic collection */ pushScheduledPhase(nextScheduledPhase); VM.collection.triggerAsyncCollection(Collection.INTERNAL_PHASE_GC_TRIGGER); } }
@Inline private boolean acquireRecyclableBlockAddressOrder() { if (recyclableExhausted) { if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) { Log.writeln("[no recyclable available]"); } return false; } int markState = 0; boolean usable = false; while (!usable) { Address next = recyclableBlock.plus(BYTES_IN_BLOCK); if (recyclableBlock.isZero() || ImmixSpace.isRecycleAllocChunkAligned(next)) { recyclableBlock = space.acquireReusableBlocks(); if (recyclableBlock.isZero()) { recyclableExhausted = true; if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) { Log.writeln("[recyclable exhausted]"); } line = LINES_IN_BLOCK; return false; } } else { recyclableBlock = next; } markState = Block.getBlockMarkState(recyclableBlock); usable = (markState > 0 && markState <= ImmixSpace.getReusuableMarkStateThreshold(copy)); if (copy && Block.isDefragSource(recyclableBlock)) usable = false; } if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!Block.isUnused(recyclableBlock)); Block.setBlockAsReused(recyclableBlock); lineUseCount += (LINES_IN_BLOCK - markState); return true; // found something good }
/** Notify that the concurrent phase has not finished, and must be re-attempted. */ public static void notifyConcurrentPhaseIncomplete() { if (Options.verbose.getValue() >= 2) { Log.write("< Concurrent phase "); Log.write(getName(concurrentPhaseId)); Log.writeln(" incomplete >"); } scheduleConcurrentWorkers(); }
/** * This method logs a message with preprended thread id, if the verbosity level is greater or * equal to the passed level. * * @param minVerbose The required verbosity level * @param message The message to display */ @Inline protected final void logMessage(int minVerbose, String message) { if (Options.verbose.getValue() >= minVerbose) { Log.prependThreadId(); Log.write(" "); Log.writeln(message); } }
/** * Log a message from within 'poll' * * @param space * @param message */ private void logPoll(Space space, String message) { if (Options.verbose.getValue() >= 3) { Log.write(" [POLL] "); Log.write(space.getName()); Log.write(": "); Log.writeln(message); } }
/** * Send the data and summary for this stream. * * @param event The event * @param numTiles The number of tiles to send (which may be less than maxTileNum) */ public void send(int event, int numTiles) { if (DEBUG) { Log.write("sending "); Log.write(numTiles); Log.writeln(" int values"); } serverSpace.stream(streamId, numTiles); for (int index = 0; index < numTiles; index++) serverSpace.streamIntValue(data[index]); serverSpace.streamEnd(); sendSummary(); }
/** * The VM is about to exit. Perform any clean up operations. * * @param value The exit value */ @Interruptible public void notifyExit(int value) { if (Options.harnessAll.getValue()) harnessEnd(); if (Options.verbose.getValue() == 1) { Log.write("[End "); totalTime.printTotalSecs(); Log.writeln(" s]"); } else if (Options.verbose.getValue() == 2) { Log.write("[End "); totalTime.printTotalMillis(); Log.writeln(" ms]"); } if (Options.verboseTiming.getValue()) printDetailedTiming(true); }
/** * Is the specified object live? * * @param object The object. * @return True if the object is live. */ @Inline public boolean isLive(ObjectReference object) { Space space = Space.getSpaceForObject(object); if (space == Plan.loSpace) return Plan.loSpace.isLive(object); else if (space == Plan.ploSpace) return Plan.ploSpace.isLive(object); else if (Plan.USE_CODE_SPACE && space == Plan.smallCodeSpace) return Plan.smallCodeSpace.isLive(object); else if (Plan.USE_CODE_SPACE && space == Plan.largeCodeSpace) return Plan.largeCodeSpace.isLive(object); else if (space == null) { if (VM.VERIFY_ASSERTIONS) { Log.write("space failure: "); Log.writeln(object); } } return true; }
/** * Notify that the current thread believes that a concurrent collection phase is complete. * * @return True if this was the last thread. */ public static boolean completeConcurrentPhase() { boolean result = false; concurrentWorkersLock.acquire(); if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(allowConcurrentWorkersActive); concurrentWorkersActive--; if (concurrentWorkersActive == 0) { allowConcurrentWorkersActive = false; result = true; } if (Options.verbose.getValue() >= 3) { Log.write("< Concurrent worker "); Log.write(concurrentWorkersActive); Log.write(" completed phase "); Log.write(getName(concurrentPhaseId)); Log.writeln(" >"); } concurrentWorkersLock.release(); return result; }
/** * Perform some concurrent collection work. * * @param phaseId The unique phase identifier */ @Unpreemptible public void concurrentCollectionPhase(short phaseId) { if (phaseId == Concurrent.CONCURRENT_CLOSURE) { if (VM.VERIFY_ASSERTIONS) { VM.assertions._assert(!Plan.gcInProgress()); } TraceLocal trace = getCurrentTrace(); while (!trace.incrementalTrace(100)) { if (group.isAborted()) { trace.flush(); break; } } if (rendezvous() == 0) { continueCollecting = false; if (!group.isAborted()) { /* We are responsible for ensuring termination. */ if (Options.verbose.getValue() >= 2) Log.writeln("< requesting mutator flush >"); VM.collection.requestMutatorFlush(); if (Options.verbose.getValue() >= 2) Log.writeln("< mutators flushed >"); if (concurrentTraceComplete()) { continueCollecting = Phase.notifyConcurrentPhaseComplete(); } else { continueCollecting = true; Phase.notifyConcurrentPhaseIncomplete(); } } } rendezvous(); return; } Log.write("Concurrent phase "); Log.write(Phase.getName(phaseId)); Log.writeln(" not handled."); VM.assertions.fail("Concurrent phase not handled!"); }
/** * Trace a reference during GC. This involves determining which collection policy applies and * calling the appropriate <code>trace</code> method. * * @param target The object the interior edge points within. * @param slot The location of the interior edge. * @param root True if this is a root edge. */ public final void processInteriorEdge(ObjectReference target, Address slot, boolean root) { Address interiorRef = slot.loadAddress(); Offset offset = interiorRef.diff(target.toAddress()); ObjectReference newTarget = traceObject(target, root); if (VM.VERIFY_ASSERTIONS) { if (offset.sLT(Offset.zero()) || offset.sGT(Offset.fromIntSignExtend(1 << 24))) { // There is probably no object this large Log.writeln("ERROR: Suspiciously large delta to interior pointer"); Log.write(" object base = "); Log.writeln(target); Log.write(" interior reference = "); Log.writeln(interiorRef); Log.write(" delta = "); Log.writeln(offset); VM.assertions._assert(false); } } slot.store(newTarget.toAddress().plus(offset)); }
/** Attempt to begin execution of a concurrent collection phase. */ public static boolean startConcurrentPhase() { boolean result = false; concurrentWorkersLock.acquire(); if (concurrentPhaseActive()) { if (allowConcurrentWorkersActive) { concurrentWorkersActive++; result = true; } VM.activePlan.collector().clearResetConcurrentWork(); } if (Options.verbose.getValue() >= 2) { if (result) { Log.write("< Concurrent worker "); Log.write(concurrentWorkersActive - 1); Log.write(" started phase "); Log.write(getName(concurrentPhaseId)); Log.writeln(" >"); } else { Log.writeln("< worker failed in attempt to start phase >"); } } concurrentWorkersLock.release(); return result; }
/** Process the phase stack. This method is called by multiple threads. */ private static boolean processPhaseStack(boolean resume) { int order = VM.collection.rendezvous(1001); final boolean primary = order == 1; boolean log = Options.verbose.getValue() >= 6; boolean logDetails = Options.verbose.getValue() >= 7; if (primary && resume) { if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!Phase.isPhaseStackEmpty()); if (VM.VERIFY_ASSERTIONS) VM.assertions._assert(!Plan.gcInProgress()); Plan.setGCStatus(Plan.GC_PROPER); } /* In order to reduce the need for synchronization, we keep an odd or even * counter for the number of phases processed. As each phase has a single * rendezvous it is only possible to be out by one so the odd or even counter * protects us. */ boolean isEvenPhase = true; if (primary) { /* First phase will be even, so we say we are odd here so that the next phase set is even*/ setNextPhase(false, getNextPhase(), false); } /* Make sure everyone sees the first phase */ VM.collection.rendezvous(1002); /* Global and Collector instances used in phases */ Plan plan = VM.activePlan.global(); CollectorContext collector = VM.activePlan.collector(); /* The main phase execution loop */ int scheduledPhase; while ((scheduledPhase = getCurrentPhase(isEvenPhase)) > 0) { short schedule = getSchedule(scheduledPhase); short phaseId = getPhaseId(scheduledPhase); Phase p = getPhase(phaseId); /* Start the timer(s) */ if (primary) { if (resume) { resumeComplexTimers(); } if (p.timer != null) p.timer.start(); if (startComplexTimer > 0) { Phase.getPhase(startComplexTimer).timer.start(); startComplexTimer = 0; } } if (log) { Log.write("Execute "); p.logPhase(); } /* Execute a single simple scheduled phase */ switch (schedule) { /* Global phase */ case SCHEDULE_GLOBAL: { if (logDetails) Log.writeln(" as Global..."); if (primary) plan.collectionPhase(phaseId); break; } /* Collector phase */ case SCHEDULE_COLLECTOR: { if (logDetails) Log.writeln(" as Collector..."); collector.collectionPhase(phaseId, primary); break; } /* Mutator phase */ case SCHEDULE_MUTATOR: { if (logDetails) Log.writeln(" as Mutator..."); /* Iterate through all mutator contexts */ MutatorContext mutator; while ((mutator = VM.activePlan.getNextMutator()) != null) { mutator.collectionPhase(phaseId, primary); } break; } /* Concurrent phase */ case SCHEDULE_CONCURRENT: { /* We are yielding to a concurrent collection phase */ if (logDetails) Log.writeln(" as Concurrent, yielding..."); if (primary) { concurrentPhaseId = phaseId; scheduleConcurrentWorkers(); /* Concurrent phase, we need to stop gc */ Plan.setGCStatus(Plan.NOT_IN_GC); } VM.collection.rendezvous(1003); if (primary) { pauseComplexTimers(); } return false; } default: { /* getNextPhase has done the wrong thing */ VM.assertions.fail("Invalid schedule in Phase.processPhaseStack"); break; } } if (primary) { /* Set the next phase by processing the stack */ int next = getNextPhase(); boolean needsResetRendezvous = (next > 0) && (schedule == SCHEDULE_MUTATOR && getSchedule(next) == SCHEDULE_MUTATOR); setNextPhase(isEvenPhase, next, needsResetRendezvous); } /* Sync point after execution of a phase */ VM.collection.rendezvous(1004); /* Mutator phase reset */ if (primary && schedule == SCHEDULE_MUTATOR) { VM.activePlan.resetMutatorIterator(); } /* At this point, in the case of consecutive phases with mutator * scheduling, we have to double-synchronize to ensure all * collector threads see the reset mutator counter. */ if (needsMutatorResetRendezvous(isEvenPhase)) { VM.collection.rendezvous(1005); } /* Stop the timer(s) */ if (primary) { if (p.timer != null) p.timer.stop(); if (stopComplexTimer > 0) { Phase.getPhase(stopComplexTimer).timer.stop(); stopComplexTimer = 0; } } /* Flip the even / odd phase sense */ isEvenPhase = !isEvenPhase; resume = false; } /* Phase stack exhausted so we return true */ return true; }
public final void printUsedPages() { Log.write("reserved = "); Log.write(Conversions.pagesToMBytes(getPagesReserved())); Log.write(" MB ("); Log.write(getPagesReserved()); Log.write(" pgs)"); Log.write(" total = "); Log.write(Conversions.pagesToMBytes(getTotalPages())); Log.write(" MB ("); Log.write(getTotalPages()); Log.write(" pgs)"); Log.writeln(); }
/** Print out statistics at the end of a GC */ public final void printPostStats() { if ((Options.verbose.getValue() == 1) || (Options.verbose.getValue() == 2)) { Log.write("-> "); Log.writeDec(Conversions.pagesToBytes(getPagesUsed()).toWord().rshl(10)); Log.write("KB "); if (Options.verbose.getValue() == 1) { totalTime.printLast(); Log.writeln(" ms]"); } else { Log.write("End "); totalTime.printTotal(); Log.writeln(" ms]"); } } if (Options.verbose.getValue() > 2) { Log.write(" After Collection: "); Space.printUsageMB(); if (Options.verbose.getValue() >= 4) { Log.write(" "); Space.printUsagePages(); } if (Options.verbose.getValue() >= 5) { Space.printVMMap(); } Log.write(" "); printUsedPages(); Log.write(" Collection time: "); totalTime.printLast(); Log.writeln(" ms"); } }
/** Print out statistics at the start of a GC */ public void printPreStats() { if ((Options.verbose.getValue() == 1) || (Options.verbose.getValue() == 2)) { Log.write("[GC "); Log.write(Stats.gcCount()); if (Options.verbose.getValue() == 1) { Log.write(" Start "); Plan.totalTime.printTotalSecs(); Log.write(" s"); } else { Log.write(" Start "); Plan.totalTime.printTotalMillis(); Log.write(" ms"); } Log.write(" "); Log.write(Conversions.pagesToKBytes(getPagesUsed())); Log.write("KB "); Log.flush(); } if (Options.verbose.getValue() > 2) { Log.write("Collection "); Log.write(Stats.gcCount()); Log.write(": "); printUsedPages(); Log.write(" Before Collection: "); Space.printUsageMB(); if (Options.verbose.getValue() >= 4) { Log.write(" "); Space.printUsagePages(); } if (Options.verbose.getValue() >= 5) { Space.printVMMap(); } } }
/** * Print pre-collection statistics. In this class we prefix the output indicating whether the * collection was full heap or not. */ public void printPreStats() { if ((Options.verbose.getValue() >= 1) && (gcFullHeap)) Log.write("[Full heap]"); super.printPreStats(); }
private boolean acquireRecyclableLines(int bytes, int align, int offset) { while (line < LINES_IN_BLOCK || acquireRecyclableBlock()) { line = space.getNextAvailableLine(markTable, line); if (line < LINES_IN_BLOCK) { int endLine = space.getNextUnavailableLine(markTable, line); cursor = recyclableBlock.plus(Extent.fromIntSignExtend(line << LOG_BYTES_IN_LINE)); limit = recyclableBlock.plus(Extent.fromIntSignExtend(endLine << LOG_BYTES_IN_LINE)); if (SANITY_CHECK_LINE_MARKS) { Address tmp = cursor; while (tmp.LT(limit)) { if (tmp.loadByte() != (byte) 0) { Log.write("cursor: "); Log.writeln(cursor); Log.write(" limit: "); Log.writeln(limit); Log.write("current: "); Log.write(tmp); Log.write(" value: "); Log.write(tmp.loadByte()); Log.write(" line: "); Log.write(line); Log.write("endline: "); Log.write(endLine); Log.write(" chunk: "); Log.write(Chunk.align(cursor)); Log.write(" hw: "); Log.write(Chunk.getHighWater(Chunk.align(cursor))); Log.writeln(" values: "); Address tmp2 = cursor; while (tmp2.LT(limit)) { Log.write(tmp2.loadByte()); Log.write(" "); } Log.writeln(); } VM.assertions._assert(tmp.loadByte() == (byte) 0); tmp = tmp.plus(1); } } if (VM.VERIFY_ASSERTIONS && bytes <= BYTES_IN_LINE) { Address start = alignAllocationNoFill(cursor, align, offset); Address end = start.plus(bytes); VM.assertions._assert(end.LE(limit)); } VM.memory.zero(cursor, limit.diff(cursor).toWord().toExtent()); if (VM.VERIFY_ASSERTIONS && Options.verbose.getValue() >= 9) { Log.write("Z["); Log.write(cursor); Log.write("->"); Log.write(limit); Log.writeln("]"); } line = endLine; if (VM.VERIFY_ASSERTIONS && copy) VM.assertions._assert(!Block.isDefragSource(cursor)); return true; } } return false; }
/** Print out the status of the allocator (for debugging) */ public final void show() { Log.write("cursor = "); Log.write(cursor); Log.write(" limit = "); Log.writeln(limit); }