/** * For each in a set of instructions, rewrite every def to use a new temporary register. If a * rewritten def is subsequently used, then use the new temporary register instead. */ private void rewriteWithTemporaries(Instruction[] set, IR ir) { // Maintain a mapping holding the new name for each register HashMap<Register, Register> map = new HashMap<Register, Register>(); for (Instruction s : set) { // rewrite the uses to use the new names for (Enumeration<Operand> e = s.getUses(); e.hasMoreElements(); ) { Operand use = e.nextElement(); if (use != null && use.isRegister()) { Register r = use.asRegister().getRegister(); Register temp = map.get(r); if (temp != null) { use.asRegister().setRegister(temp); } } } if (VM.VerifyAssertions) VM._assert(s.getNumberOfDefs() == 1); Operand def = s.getDefs().nextElement(); RegisterOperand rDef = def.asRegister(); RegisterOperand temp = ir.regpool.makeTemp(rDef); map.put(rDef.getRegister(), temp.getRegister()); s.replaceOperand(def, temp); } }
@Override public void cleanUpAndInsertEpilogue() { Instruction inst = ir.firstInstructionInCodeOrder().nextInstructionInCodeOrder(); for (; inst != null; inst = inst.nextInstructionInCodeOrder()) { switch (inst.getOpcode()) { case IA32_MOV_opcode: // remove frivolous moves Operand result = MIR_Move.getResult(inst); Operand val = MIR_Move.getValue(inst); if (result.similar(val)) { inst = inst.remove(); } break; case IA32_FMOV_opcode: case IA32_MOVSS_opcode: case IA32_MOVSD_opcode: // remove frivolous moves result = MIR_Move.getResult(inst); val = MIR_Move.getValue(inst); if (result.similar(val)) { inst = inst.remove(); } break; case IA32_RET_opcode: if (frameIsRequired()) { insertEpilogue(inst); } default: break; } } // now that the frame size is fixed, fix up the spill location code rewriteStackLocations(); }
/** * Before instruction s, insert code to adjust ESP so that it lies at a particular offset from its * usual location. * * @param s the instruction before which ESP must have the desired offset * @param desiredOffset the desired offset */ private void moveESPBefore(Instruction s, int desiredOffset) { PhysicalRegisterSet phys = (PhysicalRegisterSet) ir.regpool.getPhysicalRegisterSet(); Register ESP = phys.getESP(); int delta = desiredOffset - ESPOffset; if (delta != 0) { if (canModifyEFLAGS(s)) { s.insertBefore( MIR_BinaryAcc.create( IA32_ADD, new RegisterOperand(ESP, PRIMITIVE_TYPE_FOR_WORD), VM.BuildFor32Addr ? IC(delta) : LC(delta))); } else { MemoryOperand M = MemoryOperand.BD( new RegisterOperand(ESP, PRIMITIVE_TYPE_FOR_WORD), Offset.fromIntSignExtend(delta), (byte) WORDSIZE, null, null); s.insertBefore( MIR_Lea.create(IA32_LEA, new RegisterOperand(ESP, PRIMITIVE_TYPE_FOR_WORD), M)); } ESPOffset = desiredOffset; } }
/* generate yieldpoint without checking threadSwith request */ private static void expandUnconditionalYieldpoint(Instruction s, IR ir, RVMMethod meth) { // split the basic block after the yieldpoint, create a new // block at the end of the IR to hold the yieldpoint, // remove the yieldpoint (to prepare to out it in the new block at the end) BasicBlock thisBlock = s.getBasicBlock(); BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(s, ir); BasicBlock yieldpoint = thisBlock.createSubBlock(s.getBytecodeIndex(), ir); thisBlock.insertOut(yieldpoint); yieldpoint.insertOut(nextBlock); ir.cfg.addLastInCodeOrder(yieldpoint); s.remove(); // change thread switch instruction into call to thread switch routine // NOTE: must make s the call instruction: it is the GC point! // must also inform the GCMap that s has been moved!!! Offset offset = meth.getOffset(); LocationOperand loc = new LocationOperand(offset); Operand guard = TG(); Operand target = MemoryOperand.D(Magic.getTocPointer().plus(offset), (byte) 4, loc, guard); MIR_Call.mutate0(s, CALL_SAVE_VOLATILE, null, null, target, MethodOperand.STATIC(meth)); yieldpoint.appendInstruction(s); ir.MIRInfo.gcIRMap.moveToEnd(s); yieldpoint.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget())); // make a jump to yield block thisBlock.appendInstruction(MIR_Branch.create(IA32_JMP, yieldpoint.makeJumpTarget())); }
/** * Create a new exception handler BBLE (and exception handler basic block) for the specified * bytecode index and exception type. * * @param loc bytecode index * @param position inline sequence * @param eType exception type * @param temps the register pool to allocate exceptionObject from * @param exprStackSize max size of expression stack * @param cfg ControlFlowGraph into which the block will eventually be inserted */ HandlerBlockLE( int loc, InlineSequence position, TypeOperand eType, GenericRegisterPool temps, int exprStackSize, ControlFlowGraph cfg) { super(loc); entryBlock = new ExceptionHandlerBasicBlock(SYNTH_CATCH_BCI, position, eType, cfg); block = new BasicBlock(loc, position, cfg); // NOTE: We intentionally use throwable rather than eType to avoid // having the complexity of having to regenerate the handler when a // new type of caught exception is added. Since we shouldn't care about // the performance of code in exception handling blocks, this // should be the right tradeoff. exceptionObject = temps.makeTemp(TypeReference.JavaLangThrowable); BC2IR.setGuardForRegOp(exceptionObject, new TrueGuardOperand()); // know not null high = loc; // Set up expression stack on entry to have the caught exception operand. stackState = new OperandStack(exprStackSize); stackState.push(exceptionObject); setStackKnown(); // entry block contains instructions to transfer the caught // exception object to exceptionObject. Instruction s = Nullary.create(GET_CAUGHT_EXCEPTION, exceptionObject.copyD2D()); entryBlock.appendInstruction(s); s.bcIndex = SYNTH_CATCH_BCI; entryBlock.insertOut(block); }
/** Evaluate the cost of a basic block, in number of real instructions. */ private int evaluateCost(BasicBlock bb) { int result = 0; for (Enumeration<Instruction> e = bb.forwardRealInstrEnumerator(); e.hasMoreElements(); ) { Instruction s = e.nextElement(); if (!s.isBranch()) result++; } return result; }
private boolean canModifyEFLAGS(Instruction s) { if (PhysicalDefUse.usesEFLAGS(s.operator())) { return false; } if (PhysicalDefUse.definesEFLAGS(s.operator())) { return true; } if (s.operator() == BBEND) return true; return canModifyEFLAGS(s.nextInstructionInCodeOrder()); }
/** * Update the value graph to account for a given instruction. * * @param s the instruction in question */ private void processInstruction(Instruction s) { // TODO: support all necessary types of instructions if (s.isDynamicLinkingPoint()) { processCall(s); } else if (Move.conforms(s)) { processMove(s); } else if (s.operator == PI) { processPi(s); } else if (New.conforms(s)) { processNew(s); } else if (NewArray.conforms(s)) { processNewArray(s); } else if (Unary.conforms(s)) { processUnary(s); } else if (GuardedUnary.conforms(s)) { processGuardedUnary(s); } else if (NullCheck.conforms(s)) { processNullCheck(s); } else if (ZeroCheck.conforms(s)) { processZeroCheck(s); } else if (Binary.conforms(s)) { processBinary(s); } else if (GuardedBinary.conforms(s)) { processGuardedBinary(s); } else if (InlineGuard.conforms(s)) { processInlineGuard(s); } else if (IfCmp.conforms(s)) { processIfCmp(s); } else if (Call.conforms(s)) { processCall(s); } else if (MonitorOp.conforms(s)) { processCall(s); } else if (Prepare.conforms(s)) { processCall(s); } else if (Attempt.conforms(s)) { processCall(s); } else if (CacheOp.conforms(s)) { processCall(s); } else if (ALoad.conforms(s)) { processALoad(s); } else if (PutField.conforms(s)) { processPutField(s); } else if (PutStatic.conforms(s)) { processPutStatic(s); } else if (AStore.conforms(s)) { processAStore(s); } else if (Phi.conforms(s)) { processPhi(s); } else if (s.operator() == IR_PROLOGUE) { processPrologue(s); } }
/** * Flip a conditional branch and remove the trailing goto. See comment 3) of * processConditionalBranch * * <p>Precondition isFlipCandidate(cb) * * @param cb the conditional branch instruction */ private void flipConditionalBranch(Instruction cb) { // get the trailing GOTO instruction Instruction g = cb.nextInstructionInCodeOrder(); BranchOperand gTarget = (BranchOperand) (Goto.getTarget(g).copy()); // now flip the test and set the new target IfCmp.setCond(cb, IfCmp.getCond(cb).flipCode()); IfCmp.setTarget(cb, gTarget); // Update the branch probability. It is now the opposite cb.flipBranchProbability(); // finally, remove the trailing GOTO instruction g.remove(); }
/** * Do any of the instructions in a basic block define a floating-point register? * * @param bb basic block to search * @param invert invert the sense of the search */ private static boolean hasFloatingPointDef(BasicBlock bb, boolean invert) { if (bb == null) return false; for (Enumeration<Instruction> e = bb.forwardRealInstrEnumerator(); e.hasMoreElements(); ) { Instruction s = e.nextElement(); for (Enumeration<Operand> d = s.getDefs(); d.hasMoreElements(); ) { Operand def = d.nextElement(); if (def.isRegister()) { if (def.asRegister().getRegister().isFloatingPoint() != invert) return true; } } } return false; }
/** Do any of the instructions in a basic block define a long register? */ private boolean hasLongDef(BasicBlock bb) { if (bb == null) return false; for (Enumeration<Instruction> e = bb.forwardRealInstrEnumerator(); e.hasMoreElements(); ) { Instruction s = e.nextElement(); for (Enumeration<Operand> d = s.getDefs(); d.hasMoreElements(); ) { Operand def = d.nextElement(); if (def.isRegister()) { if (def.asRegister().getRegister().isLong()) return true; } } } return false; }
/** * expand an FCLEAR pseudo-insruction using FFREEs. * * @param s the instruction to expand * @param ir the containing IR */ private static void expandFClear(Instruction s, IR ir) { int nSave = MIR_UnaryNoRes.getVal(s).asIntConstant().value; int fpStackHeight = ir.MIRInfo.fpStackHeight; PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asIA32(); for (int i = nSave; i < fpStackHeight; i++) { Register f = phys.getFPR(i); s.insertBefore(MIR_Nullary.create(IA32_FFREE, D(f))); } // Remove the FCLEAR. s.remove(); }
/** * Update the value graph to account for a given InlineGuard instruction. * * <p><b>PRECONDITION:</b> <code> InlineGuard.conforms(s); </code> * * @param s the instruction in question */ private void processInlineGuard(Instruction s) { ValueGraphVertex v = new ValueGraphVertex(s); graph.addGraphNode(v); nameMap.put(s, v); if (s.operator() == IG_PATCH_POINT) { // the 'goal' is irrelevant for patch_point guards. v.setLabel(s.operator(), 1); link(v, findOrCreateVertex(bypassMoves(InlineGuard.getValue(s))), 0); } else { v.setLabel(s.operator(), 2); link(v, findOrCreateVertex(bypassMoves(InlineGuard.getValue(s))), 0); link(v, findOrCreateVertex(InlineGuard.getGoal(s)), 1); } }
/** * Remove cb from source, updating PHI nodes to maintain SSA form. * * @param source basic block containing cb * @param cb conditional branch to remove * @param ir containing IR * @param di branch that dominates cb */ private void removeCondBranch(BasicBlock source, Instruction cb, IR ir, Instruction di) { if (DEBUG) VM.sysWrite("Eliminating definitely not-taken branch " + cb + "\n"); if (IfCmp.conforms(cb) && IfCmp.hasGuardResult(cb)) { cb.insertBefore( Move.create(GUARD_MOVE, IfCmp.getGuardResult(cb), IfCmp.getGuardResult(di).copy())); } BasicBlock deadBB = cb.getBranchTarget(); cb.remove(); source.recomputeNormalOut(ir); if (!source.pointsOut(deadBB)) { // there is no longer an edge from source to target; // update any PHIs in target to reflect this. SSA.purgeBlockFromPHIs(source, deadBB); } }
@Override public boolean mayEscapeThread(Instruction instruction) { switch (instruction.getOpcode()) { case DCBST_opcode: case DCBT_opcode: case DCBTST_opcode: case DCBZ_opcode: case DCBZL_opcode: case ICBI_opcode: return false; case LONG_OR_opcode: case LONG_AND_opcode: case LONG_XOR_opcode: case LONG_SUB_opcode: case LONG_SHL_opcode: case LONG_ADD_opcode: case LONG_SHR_opcode: case LONG_USHR_opcode: case LONG_NEG_opcode: case LONG_MOVE_opcode: case LONG_2ADDR_opcode: return true; default: throw new OptimizingCompilerException("SimpleEscapge: Unexpected " + instruction); } }
/** * Generate a boolean operation opcode * * <pre> * 1) IF br != 0 THEN x=1 ELSE x=0 replaced by INT_MOVE x=br * IF br == 0 THEN x=0 ELSE x=1 * 2) IF br == 0 THEN x=1 ELSE x=0 replaced by BOOLEAN_NOT x=br * IF br != 0 THEN x=0 ELSE x=1 * 3) IF v1 ~ v2 THEN x=1 ELSE x=0 replaced by BOOLEAN_CMP x=v1,v2,~ * </pre> * * @param cb conditional branch instruction * @param res the operand for result * @param val1 value being compared * @param val2 value being compared with * @param cond comparison condition */ private void booleanCompareHelper( Instruction cb, RegisterOperand res, Operand val1, Operand val2, ConditionOperand cond) { if ((val1 instanceof RegisterOperand) && ((RegisterOperand) val1).getType().isBooleanType() && (val2 instanceof IntConstantOperand)) { int value = ((IntConstantOperand) val2).value; if (VM.VerifyAssertions && (value != 0) && (value != 1)) { throw new OptimizingCompilerException("Invalid boolean value"); } int c = cond.evaluate(value, 0); if (c == ConditionOperand.TRUE) { Unary.mutate(cb, BOOLEAN_NOT, res, val1); return; } else if (c == ConditionOperand.FALSE) { Move.mutate(cb, INT_MOVE, res, val1); return; } } BooleanCmp.mutate( cb, (cb.operator() == REF_IFCMP) ? BOOLEAN_CMP_ADDR : BOOLEAN_CMP_INT, res, val1, val2, cond, new BranchProfileOperand()); }
@Override public void replaceOperandWithSpillLocation(Instruction s, RegisterOperand symb) { // Get the spill location previously assigned to the symbolic // register. int location = regAllocState.getSpill(symb.getRegister()); // Create a memory operand M representing the spill location. int size; if (VM.BuildFor32Addr) { if (SSE2_FULL) { size = symb.getType().getMemoryBytes(); if (size < WORDSIZE) size = WORDSIZE; } else { int type = PhysicalRegisterSet.getPhysicalRegisterType(symb.getRegister()); size = getSpillSize(type); } } else { if (VM.BuildFor64Addr && symb.getType().getMemoryBytes() <= BYTES_IN_INT) { // Int-like types and floats need 32-bit locations size = BYTES_IN_INT; } else { size = WORDSIZE; } } StackLocationOperand M = new StackLocationOperand(true, -location, (byte) size); if (VERBOSE_DEBUG) { System.out.println("REPLACE_OP_WITH_SPILL_LOC: " + "Instruction before replacement: " + s); } // replace the register operand with the memory operand s.replaceOperand(symb, M); if (VERBOSE_DEBUG) { System.out.println("REPLACE_OP_WITH_SPILL_LOC: " + "Instruction after replacement: " + s); } }
/** Transform cb into a GOTO, updating PHI nodes to maintain SSA form. */ private void takeCondBranch(BasicBlock source, Instruction cb, IR ir) { if (DEBUG) VM.sysWrite("Eliminating definitely taken branch " + cb + "\n"); BasicBlock deadBB = source.nextBasicBlockInCodeOrder(); Instruction next = cb.nextInstructionInCodeOrder(); if (Goto.conforms(next)) { deadBB = next.getBranchTarget(); next.remove(); } Goto.mutate(cb, GOTO, cb.getBranchTarget().makeJumpTarget()); source.recomputeNormalOut(ir); if (!source.pointsOut(deadBB)) { // there is no longer an edge from source to target; // update any PHIs in target to reflect this. SSA.purgeBlockFromPHIs(source, deadBB); } }
/** * Insert code into the epilogue to restore the floating point state. * * @param inst the return instruction after the epilogue. */ private void restoreFloatingPointState(Instruction inst) { if (SSE2_FULL) { GenericPhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet(); for (int i = 0; i < 8; i++) { inst.insertBefore( MIR_Move.create( IA32_MOVQ, new RegisterOperand(phys.getFPR(i), TypeReference.Double), new StackLocationOperand( true, -fsaveLocation + (i * BYTES_IN_DOUBLE), BYTES_IN_DOUBLE))); } } else { Operand M = new StackLocationOperand(true, -fsaveLocation, 4); inst.insertBefore(MIR_FSave.create(IA32_FRSTOR, M)); } }
/** * Insert an explicit stack overflow check in the prologue <em>after</em> buying the stack frame. * * <p>SIDE EFFECT: mutates the plg into a trap instruction. We need to mutate so that the trap * instruction is in the GC map data structures. * * @param plg the prologue instruction */ private void insertNormalStackOverflowCheck(Instruction plg) { if (!ir.method.isInterruptible()) { plg.remove(); return; } if (ir.compiledMethod.isSaveVolatile()) { return; } PhysicalRegisterSet phys = (PhysicalRegisterSet) ir.regpool.getPhysicalRegisterSet(); Register ESP = phys.getESP(); MemoryOperand M = MemoryOperand.BD( ir.regpool.makeTROp(), Entrypoints.stackLimitField.getOffset(), (byte) WORDSIZE, null, null); // Trap if ESP <= active Thread Stack Limit MIR_TrapIf.mutate( plg, IA32_TRAPIF, null, new RegisterOperand(ESP, PRIMITIVE_TYPE_FOR_WORD), M, IA32ConditionOperand.LE(), TrapCodeOperand.StackOverflow()); }
/** * Update the value graph to account for an IR_PROLOGUE instruction * * <p><b>PRECONDITION:</b> <code> Prologue.conforms(s); </code> * * @param s the instruction in question */ private void processPrologue(Instruction s) { int numArgs = 0; for (Enumeration<Operand> e = s.getDefs(); e.hasMoreElements(); numArgs++) { Register formal = ((RegisterOperand) e.nextElement()).getRegister(); ValueGraphVertex v = findOrCreateVertex(formal); v.setLabel(new ValueGraphParamLabel(numArgs), 0); } }
/** * Update the value graph to account for a given IfCmp instruction. * * <p><b>PRECONDITION:</b> <code> IfCmp.conforms(s); </code> * * @param s the instruction in question */ private void processIfCmp(Instruction s) { ValueGraphVertex v = new ValueGraphVertex(s); graph.addGraphNode(v); nameMap.put(s, v); v.setLabel(s.operator(), 3); link(v, findOrCreateVertex(bypassMoves(IfCmp.getVal1(s))), 0); link(v, findOrCreateVertex(bypassMoves(IfCmp.getVal2(s))), 1); link(v, findOrCreateVertex(IfCmp.getCond(s)), 2); }
/** * Perform optimizations for an inline guard. * * <p>Precondition: InlineGuard.conforms(cb) * * @param ir the governing IR * @param cb the instruction to optimize * @param bb the basic block holding if * @return {@code true} iff made a transformation */ private boolean processInlineGuard(IR ir, Instruction cb, BasicBlock bb) { BasicBlock targetBlock = cb.getBranchTarget(); Instruction targetLabel = targetBlock.firstInstruction(); // get the first real instruction at the branch target // NOTE: this instruction is not necessarily in targetBlock, // iff targetBlock has no real instructions Instruction targetInst = firstRealInstructionFollowing(targetLabel); if (targetInst == null || targetInst == cb) { return false; } boolean endsBlock = cb.nextInstructionInCodeOrder().operator() == BBEND; if (endsBlock) { Instruction nextLabel = firstLabelFollowing(cb); if (targetLabel == nextLabel) { // found a conditional branch to the next instruction. just remove it. cb.remove(); return true; } Instruction nextI = firstRealInstructionFollowing(nextLabel); if (nextI != null && Goto.conforms(nextI)) { // replicate Goto cb.insertAfter(nextI.copyWithoutLinks()); bb.recomputeNormalOut(ir); // fix the CFG return true; } } // do we fall through to a block that has only a goto? BasicBlock fallThrough = bb.getFallThroughBlock(); if (fallThrough != null) { Instruction fallThroughInstruction = fallThrough.firstRealInstruction(); if ((fallThroughInstruction != null) && Goto.conforms(fallThroughInstruction)) { // copy goto to bb bb.appendInstruction(fallThroughInstruction.copyWithoutLinks()); bb.recomputeNormalOut(ir); } } if (Goto.conforms(targetInst)) { // conditional branch to unconditional branch. // change conditional branch target to latter's target InlineGuard.setTarget(cb, (BranchOperand) Goto.getTarget(targetInst).copy()); bb.recomputeNormalOut(ir); // fix the CFG return true; } if (targetBlock.isEmpty()) { // branch to an empty block. Change target to the next block. BasicBlock nextBlock = targetBlock.getFallThroughBlock(); InlineGuard.setTarget(cb, nextBlock.makeJumpTarget()); bb.recomputeNormalOut(ir); // fix the CFG return true; } return false; }
/** * Insert an explicit stack overflow check in the prologue <em>before</em> buying the stack frame. * SIDE EFFECT: mutates the plg into a trap instruction. We need to mutate so that the trap * instruction is in the GC map data structures. * * @param plg the prologue instruction */ private void insertBigFrameStackOverflowCheck(Instruction plg) { if (!ir.method.isInterruptible()) { plg.remove(); return; } if (ir.compiledMethod.isSaveVolatile()) { return; } PhysicalRegisterSet phys = (PhysicalRegisterSet) ir.regpool.getPhysicalRegisterSet(); Register ESP = phys.getESP(); Register ECX = phys.getECX(); // ECX := active Thread Stack Limit MemoryOperand M = MemoryOperand.BD( ir.regpool.makeTROp(), Entrypoints.stackLimitField.getOffset(), (byte) WORDSIZE, null, null); plg.insertBefore( MIR_Move.create(IA32_MOV, new RegisterOperand((ECX), PRIMITIVE_TYPE_FOR_WORD), M)); // ECX += frame Size int frameSize = getFrameFixedSize(); plg.insertBefore( MIR_BinaryAcc.create( IA32_ADD, new RegisterOperand(ECX, PRIMITIVE_TYPE_FOR_WORD), VM.BuildFor32Addr ? IC(frameSize) : LC(frameSize))); // Trap if ESP <= ECX MIR_TrapIf.mutate( plg, IA32_TRAPIF, null, new RegisterOperand(ESP, PRIMITIVE_TYPE_FOR_WORD), new RegisterOperand(ECX, PRIMITIVE_TYPE_FOR_WORD), IA32ConditionOperand.LE(), TrapCodeOperand.StackOverflow()); }
/** * Update the value graph to account for a given NullCheck instruction. * * <p><b>PRECONDITION:</b> <code> ZeroCheck.conforms(s); </code> * * @param s the instruction in question */ private void processZeroCheck(Instruction s) { // label the vertex corresponding to the result with the operator RegisterOperand result = ZeroCheck.getGuardResult(s); ValueGraphVertex v = findOrCreateVertex(result.getRegister()); v.setLabel(s.operator(), 1); // link node v to the operand it uses Operand val = ZeroCheck.getValue(s); // bypass Move instructions val = bypassMoves(val); link(v, findOrCreateVertex(val), 0); }
/** * Insert the epilogue before a particular return instruction. * * @param ret the return instruction. */ private void insertEpilogue(Instruction ret) { // 1. Restore any saved registers if (ir.compiledMethod.isSaveVolatile()) { restoreVolatileRegisters(ret); restoreFloatingPointState(ret); } restoreNonVolatiles(ret); // 2. Restore caller's stackpointer and framepointer int frameSize = getFrameFixedSize(); ret.insertBefore(MIR_UnaryNoRes.create(REQUIRE_ESP, IC(frameSize))); MemoryOperand fpHome = MemoryOperand.BD( ir.regpool.makeTROp(), ArchEntrypoints.framePointerField.getOffset(), (byte) WORDSIZE, null, null); ret.insertBefore(MIR_Nullary.create(IA32_POP, fpHome)); }
private static void expandYieldpoint( Instruction s, IR ir, RVMMethod meth, IA32ConditionOperand ypCond) { // split the basic block after the yieldpoint, create a new // block at the end of the IR to hold the yieldpoint, // remove the yieldpoint (to prepare to out it in the new block at the end) BasicBlock thisBlock = s.getBasicBlock(); BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(s, ir); BasicBlock yieldpoint = thisBlock.createSubBlock(s.getBytecodeIndex(), ir, 0); thisBlock.insertOut(yieldpoint); yieldpoint.insertOut(nextBlock); ir.cfg.addLastInCodeOrder(yieldpoint); s.remove(); // change thread switch instruction into call to thread switch routine // NOTE: must make s the call instruction: it is the GC point! // must also inform the GCMap that s has been moved!!! Offset offset = meth.getOffset(); LocationOperand loc = new LocationOperand(offset); Operand guard = TG(); Operand target; if (JTOC_REGISTER == null) { target = MemoryOperand.D(Magic.getTocPointer().plus(offset), (byte) 4, loc, guard); } else { target = MemoryOperand.BD(ir.regpool.makeTocOp().asRegister(), offset, (byte) 8, loc, guard); } MIR_Call.mutate0(s, CALL_SAVE_VOLATILE, null, null, target, MethodOperand.STATIC(meth)); yieldpoint.appendInstruction(s); ir.MIRInfo.gcIRMap.moveToEnd(s); yieldpoint.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget())); // Check to see if threadSwitch requested Offset tsr = Entrypoints.takeYieldpointField.getOffset(); MemoryOperand M = MemoryOperand.BD(ir.regpool.makeTROp(), tsr, (byte) 4, null, null); thisBlock.appendInstruction(MIR_Compare.create(IA32_CMP, M, IC(0))); thisBlock.appendInstruction( MIR_CondBranch.create( IA32_JCC, ypCond, yieldpoint.makeJumpTarget(), BranchProfileOperand.never())); }
/** * Insert code before a return instruction to restore the volatile and volatile registers. * * @param inst the return instruction */ private void restoreVolatileRegisters(Instruction inst) { GenericPhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet(); // Restore every GPR int i = 0; for (Enumeration<Register> e = phys.enumerateVolatileGPRs(); e.hasMoreElements(); i++) { Register r = e.nextElement(); int location = saveVolatileGPRLocation[i]; Operand M = new StackLocationOperand(true, -location, WORDSIZE); inst.insertBefore( MIR_Move.create(IA32_MOV, new RegisterOperand(r, PRIMITIVE_TYPE_FOR_WORD), M)); } }
/** * Transform to eliminate redundant branches passed on GVNs and dominator information. * * @param ir The IR on which to apply the phase */ public void perform(IR ir) { // (1) Remove redundant conditional branches and locally fix the PHIs GlobalValueNumberState gvns = ir.HIRInfo.valueNumbers; DominatorTree dt = ir.HIRInfo.dominatorTree; for (BasicBlockEnumeration bbs = ir.getBasicBlocks(); bbs.hasMoreElements(); ) { BasicBlock candBB = bbs.next(); Instruction candTest = candBB.firstBranchInstruction(); if (candTest == null) continue; if (!(IfCmp.conforms(candTest) || InlineGuard.conforms(candTest))) continue; GVCongruenceClass cc = gvns.congruenceClass(candTest); if (cc.size() > 1) { for (ValueGraphVertex vertex : cc) { Instruction poss = (Instruction) vertex.getName(); if (poss != candTest) { BasicBlock notTaken = getNotTakenBlock(poss); BasicBlock taken = poss.getBranchTarget(); if (taken == notTaken) continue; // both go to same block, so we don't know anything! if (notTaken.hasOneIn() && dt.dominates(notTaken, candBB)) { if (DEBUG) VM.sysWrite(candTest + " is dominated by not-taken branch of " + poss + "\n"); removeCondBranch(candBB, candTest, ir, poss); cc.removeVertex(gvns.valueGraph.getVertex(candTest)); break; } if (taken.hasOneIn() && dt.dominates(taken, candBB)) { if (DEBUG) VM.sysWrite(candTest + " is dominated by taken branch of " + poss + "\n"); takeCondBranch(candBB, candTest, ir); cc.removeVertex(gvns.valueGraph.getVertex(candTest)); break; } } } } } // (2) perform a Depth-first search of the control flow graph, // and remove any nodes we have made unreachable removeUnreachableCode(ir); }
/** * For each real non-branch instruction s in bb, * * <ul> * <li>Copy s to s', and store s' in the returned array * <li>Insert the function s->s' in the map * </ul> */ private Instruction[] copyAndMapInstructions( BasicBlock bb, HashMap<Instruction, Instruction> map) { if (bb == null) return new Instruction[0]; int count = 0; // first count the number of instructions for (Enumeration<Instruction> e = bb.forwardRealInstrEnumerator(); e.hasMoreElements(); ) { Instruction s = e.nextElement(); if (s.isBranch()) continue; count++; } // now copy. Instruction[] result = new Instruction[count]; int i = 0; for (Enumeration<Instruction> e = bb.forwardRealInstrEnumerator(); e.hasMoreElements(); ) { Instruction s = e.nextElement(); if (s.isBranch()) continue; Instruction sprime = s.copyWithoutLinks(); result[i++] = sprime; map.put(s, sprime); } return result; }