/* generate yieldpoint without checking threadSwith request
   */
  private static void expandUnconditionalYieldpoint(Instruction s, IR ir, RVMMethod meth) {
    // split the basic block after the yieldpoint, create a new
    // block at the end of the IR to hold the yieldpoint,
    // remove the yieldpoint (to prepare to out it in the new block at the end)
    BasicBlock thisBlock = s.getBasicBlock();
    BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(s, ir);
    BasicBlock yieldpoint = thisBlock.createSubBlock(s.getBytecodeIndex(), ir);
    thisBlock.insertOut(yieldpoint);
    yieldpoint.insertOut(nextBlock);
    ir.cfg.addLastInCodeOrder(yieldpoint);
    s.remove();

    // change thread switch instruction into call to thread switch routine
    // NOTE: must make s the call instruction: it is the GC point!
    //       must also inform the GCMap that s has been moved!!!
    Offset offset = meth.getOffset();
    LocationOperand loc = new LocationOperand(offset);
    Operand guard = TG();
    Operand target = MemoryOperand.D(Magic.getTocPointer().plus(offset), (byte) 4, loc, guard);
    MIR_Call.mutate0(s, CALL_SAVE_VOLATILE, null, null, target, MethodOperand.STATIC(meth));
    yieldpoint.appendInstruction(s);
    ir.MIRInfo.gcIRMap.moveToEnd(s);

    yieldpoint.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget()));

    // make a jump to yield block
    thisBlock.appendInstruction(MIR_Branch.create(IA32_JMP, yieldpoint.makeJumpTarget()));
  }
 @Test
 public void addRangeChangesEndOfLastIntervalWhenRangesDirectlyFollowEachOther_DefUse() {
   Register reg = new Register(3);
   CompoundInterval ci = new CompoundInterval(DEFAULT_BEGIN, DEFAULT_END, reg);
   assertThat(ci.last().getEnd(), is(DEFAULT_END));
   RegisterAllocatorState regAllocState = new RegisterAllocatorState(1);
   Instruction def = Empty.create(WRITE_FLOOR);
   Instruction lastUse = Empty.create(WRITE_FLOOR);
   LiveIntervalElement live = new LiveIntervalElement(reg, def, lastUse);
   ControlFlowGraph emptyCfg = new ControlFlowGraph(0);
   BasicBlock bb = new BasicBlock(1, null, emptyCfg);
   bb.appendInstruction(def);
   bb.appendInstruction(lastUse);
   regAllocState.initializeDepthFirstNumbering(10);
   regAllocState.setDFN(def, DEFAULT_END);
   regAllocState.setDFN(lastUse, DEFAULT_END + 1);
   BasicInterval bi = ci.addRange(regAllocState, live, bb);
   assertNull(bi);
   assertThat(ci.last().getEnd(), is(DEFAULT_END + 1));
 }
  /**
   * Perform optimizations for an inline guard.
   *
   * <p>Precondition: InlineGuard.conforms(cb)
   *
   * @param ir the governing IR
   * @param cb the instruction to optimize
   * @param bb the basic block holding if
   * @return {@code true} iff made a transformation
   */
  private boolean processInlineGuard(IR ir, Instruction cb, BasicBlock bb) {
    BasicBlock targetBlock = cb.getBranchTarget();
    Instruction targetLabel = targetBlock.firstInstruction();
    // get the first real instruction at the branch target
    // NOTE: this instruction is not necessarily in targetBlock,
    // iff targetBlock has no real instructions
    Instruction targetInst = firstRealInstructionFollowing(targetLabel);
    if (targetInst == null || targetInst == cb) {
      return false;
    }
    boolean endsBlock = cb.nextInstructionInCodeOrder().operator() == BBEND;
    if (endsBlock) {
      Instruction nextLabel = firstLabelFollowing(cb);
      if (targetLabel == nextLabel) {
        // found a conditional branch to the next instruction.  just remove it.
        cb.remove();
        return true;
      }
      Instruction nextI = firstRealInstructionFollowing(nextLabel);
      if (nextI != null && Goto.conforms(nextI)) {
        // replicate Goto
        cb.insertAfter(nextI.copyWithoutLinks());
        bb.recomputeNormalOut(ir); // fix the CFG
        return true;
      }
    }
    // do we fall through to a block that has only a goto?
    BasicBlock fallThrough = bb.getFallThroughBlock();
    if (fallThrough != null) {
      Instruction fallThroughInstruction = fallThrough.firstRealInstruction();
      if ((fallThroughInstruction != null) && Goto.conforms(fallThroughInstruction)) {
        // copy goto to bb
        bb.appendInstruction(fallThroughInstruction.copyWithoutLinks());
        bb.recomputeNormalOut(ir);
      }
    }

    if (Goto.conforms(targetInst)) {
      // conditional branch to unconditional branch.
      // change conditional branch target to latter's target
      InlineGuard.setTarget(cb, (BranchOperand) Goto.getTarget(targetInst).copy());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    if (targetBlock.isEmpty()) {
      // branch to an empty block.  Change target to the next block.
      BasicBlock nextBlock = targetBlock.getFallThroughBlock();
      InlineGuard.setTarget(cb, nextBlock.makeJumpTarget());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    return false;
  }
  private static void expandYieldpoint(
      Instruction s, IR ir, RVMMethod meth, IA32ConditionOperand ypCond) {
    // split the basic block after the yieldpoint, create a new
    // block at the end of the IR to hold the yieldpoint,
    // remove the yieldpoint (to prepare to out it in the new block at the end)
    BasicBlock thisBlock = s.getBasicBlock();
    BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(s, ir);
    BasicBlock yieldpoint = thisBlock.createSubBlock(s.getBytecodeIndex(), ir, 0);
    thisBlock.insertOut(yieldpoint);
    yieldpoint.insertOut(nextBlock);
    ir.cfg.addLastInCodeOrder(yieldpoint);
    s.remove();

    // change thread switch instruction into call to thread switch routine
    // NOTE: must make s the call instruction: it is the GC point!
    //       must also inform the GCMap that s has been moved!!!
    Offset offset = meth.getOffset();
    LocationOperand loc = new LocationOperand(offset);
    Operand guard = TG();
    Operand target;
    if (JTOC_REGISTER == null) {
      target = MemoryOperand.D(Magic.getTocPointer().plus(offset), (byte) 4, loc, guard);
    } else {
      target = MemoryOperand.BD(ir.regpool.makeTocOp().asRegister(), offset, (byte) 8, loc, guard);
    }

    MIR_Call.mutate0(s, CALL_SAVE_VOLATILE, null, null, target, MethodOperand.STATIC(meth));
    yieldpoint.appendInstruction(s);
    ir.MIRInfo.gcIRMap.moveToEnd(s);

    yieldpoint.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget()));

    // Check to see if threadSwitch requested
    Offset tsr = Entrypoints.takeYieldpointField.getOffset();
    MemoryOperand M = MemoryOperand.BD(ir.regpool.makeTROp(), tsr, (byte) 4, null, null);
    thisBlock.appendInstruction(MIR_Compare.create(IA32_CMP, M, IC(0)));
    thisBlock.appendInstruction(
        MIR_CondBranch.create(
            IA32_JCC, ypCond, yieldpoint.makeJumpTarget(), BranchProfileOperand.never()));
  }
  /**
   * Perform optimizations for a conditional branch.
   *
   * <pre>
   * 1)   IF .. GOTO A          replaced by  IF .. GOTO B
   *      ...
   *   A: GOTO B
   * 2)   conditional branch to next instruction eliminated
   * 3)   IF (condition) GOTO A  replaced by  IF (!condition) GOTO B
   *      GOTO B                           A: ...
   *   A: ...
   * 4) special case to generate Boolean compare opcode
   * 5) special case to generate conditional move sequence
   * 6)   IF .. GOTO A       replaced by  IF .. GOTO B
   *   A: LABEL
   *      BBEND
   *   B:
   * 7)  fallthrough to a goto: replicate goto to enable other optimizations.
   * </pre>
   *
   * <p>Precondition: IfCmp.conforms(cb)
   *
   * @param ir the governing IR
   * @param cb the instruction to optimize
   * @param bb the basic block holding if
   * @return {@code true} iff made a transformation
   */
  private boolean processConditionalBranch(IR ir, Instruction cb, BasicBlock bb) {
    BasicBlock targetBlock = cb.getBranchTarget();

    // don't optimize jumps to a code motion landing pad
    if (targetBlock.getLandingPad()) return false;

    Instruction targetLabel = targetBlock.firstInstruction();
    // get the first real instruction at the branch target
    // NOTE: this instruction is not necessarily in targetBlock,
    // iff targetBlock has no real instructions
    Instruction targetInst = firstRealInstructionFollowing(targetLabel);
    if (targetInst == null || targetInst == cb) {
      return false;
    }
    boolean endsBlock = cb.nextInstructionInCodeOrder().operator() == BBEND;
    if (endsBlock) {
      Instruction nextLabel = firstLabelFollowing(cb);

      if (targetLabel == nextLabel) {
        // found a conditional branch to the next instruction.  just remove it.
        cb.remove();
        return true;
      }
      Instruction nextI = firstRealInstructionFollowing(nextLabel);
      if (nextI != null && Goto.conforms(nextI)) {
        // Check that the target is not the fall through (the goto itself).
        // If we add a goto to the next block, it will be removed by
        // processGoto and we will loop indefinitely.
        // This can be tripped by (strange) code such as:
        // if (condition) while (true);
        BasicBlock gotoTarget = nextI.getBranchTarget();
        Instruction gotoLabel = gotoTarget.firstInstruction();
        Instruction gotoInst = firstRealInstructionFollowing(gotoLabel);

        if (gotoInst != nextI) {
          // replicate Goto
          cb.insertAfter(nextI.copyWithoutLinks());
          bb.recomputeNormalOut(ir); // fix the CFG
          return true;
        }
      }
    }
    // attempt to generate boolean compare.
    if (generateBooleanCompare(ir, bb, cb, targetBlock)) {
      // generateBooleanCompare does all necessary CFG fixup.
      return true;
    }
    // attempt to generate a sequence using conditional moves
    if (generateCondMove(ir, bb, cb)) {
      // generateCondMove does all necessary CFG fixup.
      return true;
    }

    // do we fall through to a block that has only a goto?
    BasicBlock fallThrough = bb.getFallThroughBlock();
    if (fallThrough != null) {
      Instruction fallThroughInstruction = fallThrough.firstRealInstruction();
      if ((fallThroughInstruction != null) && Goto.conforms(fallThroughInstruction)) {
        // copy goto to bb
        bb.appendInstruction(fallThroughInstruction.copyWithoutLinks());
        bb.recomputeNormalOut(ir);
      }
    }

    if (Goto.conforms(targetInst)) {
      // conditional branch to unconditional branch.
      // change conditional branch target to latter's target
      Instruction target2 =
          firstRealInstructionFollowing(targetInst.getBranchTarget().firstInstruction());
      if (target2 == targetInst) {
        // Avoid an infinite recursion in the following scenario:
        // g: if (...) goto L
        // ...
        // L: goto L
        // This happens in GCUtil in some systems due to a while(true) {}
        return false;
      }
      IfCmp.setTarget(cb, (BranchOperand) Goto.getTarget(targetInst).copy());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    if (targetBlock.isEmpty()) {
      // branch to an empty block.  Change target to the next block.
      BasicBlock nextBlock = targetBlock.getFallThroughBlock();
      IfCmp.setTarget(cb, nextBlock.makeJumpTarget());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    if (isFlipCandidate(cb, targetInst)) {
      flipConditionalBranch(cb);
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    return false;
  }
  /**
   * Perform optimizations for a Goto.
   *
   * <p> Patterns:
   * <pre>
   *    1)      GOTO A       replaced by  GOTO B
   *         A: GOTO B
   *
   *    2)      GOTO A       replaced by  IF .. GOTO B
   *         A: IF .. GOTO B              GOTO C
   *         C: ...
   *    3)   GOTO next instruction eliminated
   *    4)      GOTO A       replaced by  GOTO B
   *         A: LABEL
   *            BBEND
   *         B:
   *    5)   GOTO BBn where BBn has exactly one in edge
   *         - move BBn immediately after the GOTO in the code order,
   *           so that pattern 3) will create a fallthrough
   * <pre>
   *
   * <p> Precondition: Goto.conforms(g)
   *
   * @param ir governing IR
   * @param g the instruction to optimize
   * @param bb the basic block holding g
   * @return {@code true} if made a transformation
   */
  private boolean processGoto(IR ir, Instruction g, BasicBlock bb) {
    BasicBlock targetBlock = g.getBranchTarget();

    // don't optimize jumps to a code motion landing pad
    if (targetBlock.getLandingPad()) return false;

    Instruction targetLabel = targetBlock.firstInstruction();
    // get the first real instruction at the g target
    // NOTE: this instruction is not necessarily in targetBlock,
    // iff targetBlock has no real instructions
    Instruction targetInst = firstRealInstructionFollowing(targetLabel);
    if (targetInst == null || targetInst == g) {
      return false;
    }
    Instruction nextLabel = firstLabelFollowing(g);
    if (targetLabel == nextLabel) {
      // found a GOTO to the next instruction.  just remove it.
      g.remove();
      return true;
    }
    if (Goto.conforms(targetInst)) {
      // unconditional branch to unconditional branch.
      // replace g with goto to targetInst's target
      Instruction target2 =
          firstRealInstructionFollowing(targetInst.getBranchTarget().firstInstruction());
      if (target2 == targetInst) {
        // Avoid an infinite recursion in the following bizarre scenario:
        // g: goto L
        // ...
        // L: goto L
        // This happens in jByteMark.EmFloatPnt.denormalize() due to a while(true) {}
        return false;
      }
      Goto.setTarget(g, (BranchOperand) Goto.getTarget(targetInst).copy());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    if (targetBlock.isEmpty()) {
      // GOTO an empty basic block.  Change target to the
      // next block.
      BasicBlock nextBlock = targetBlock.getFallThroughBlock();
      Goto.setTarget(g, nextBlock.makeJumpTarget());
      bb.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    if (mayDuplicateCondBranches && IfCmp.conforms(targetInst)) {
      // unconditional branch to a conditional branch.
      // If the Goto is the only branch instruction in its basic block
      // and the IfCmp is the only non-GOTO branch instruction
      // in its basic block then replace the goto with a copy of
      // targetInst and append another GOTO to the not-taken
      // target of targetInst's block.
      // We impose these additional restrictions to avoid getting
      // multiple conditional branches in a single basic block.
      if (!g.prevInstructionInCodeOrder().isBranch()
          && (targetInst.nextInstructionInCodeOrder().operator == BBEND
              || targetInst.nextInstructionInCodeOrder().operator == GOTO)) {
        Instruction copy = targetInst.copyWithoutLinks();
        g.replace(copy);
        Instruction newGoto = targetInst.getBasicBlock().getNotTakenNextBlock().makeGOTO();
        copy.insertAfter(newGoto);
        bb.recomputeNormalOut(ir); // fix the CFG
        return true;
      }
    }

    // try to create a fallthrough
    if (mayReorderCode && targetBlock.getNumberOfIn() == 1) {
      BasicBlock ftBlock = targetBlock.getFallThroughBlock();
      if (ftBlock != null) {
        BranchOperand ftTarget = ftBlock.makeJumpTarget();
        targetBlock.appendInstruction(CPOS(g, Goto.create(GOTO, ftTarget)));
      }

      ir.cfg.removeFromCodeOrder(targetBlock);
      ir.cfg.insertAfterInCodeOrder(bb, targetBlock);
      targetBlock.recomputeNormalOut(ir); // fix the CFG
      return true;
    }
    return false;
  }
  /**
   * @param ir the IR to expand
   * @return return value is garbage for IA32
   */
  public static int expand(IR ir) {
    PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asIA32();
    MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;

    for (Instruction next, p = ir.firstInstructionInCodeOrder(); p != null; p = next) {
      next = p.nextInstructionInCodeOrder();
      mcOffsets.setMachineCodeOffset(p, -1);

      switch (p.getOpcode()) {
        case IA32_MOVAPS_opcode:
          // a reg-reg move turned into a memory move where we can't guarantee alignment
          if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
            MIR_Move.mutate(p, IA32_MOVSS, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
          }
          break;

        case IA32_MOVAPD_opcode:
          // a reg-reg move turned into a memory move where we can't guarantee alignment
          if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
            MIR_Move.mutate(p, IA32_MOVSD, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
          }
          break;

        case IA32_TEST_opcode:
          // don't bother telling rest of compiler that memory operand
          // must be first; we can just commute it here.
          if (MIR_Test.getVal2(p).isMemory()) {
            Operand tmp = MIR_Test.getClearVal1(p);
            MIR_Test.setVal1(p, MIR_Test.getClearVal2(p));
            MIR_Test.setVal2(p, tmp);
          }
          break;

        case NULL_CHECK_opcode:
          {
            // mutate this into a TRAPIF, and then fall through to the the
            // TRAP_IF case.
            Operand ref = NullCheck.getRef(p);
            MIR_TrapIf.mutate(
                p,
                IA32_TRAPIF,
                null,
                ref.copy(),
                IC(0),
                IA32ConditionOperand.EQ(),
                TrapCodeOperand.NullPtr());
          }
          // There is no break statement here on purpose!
        case IA32_TRAPIF_opcode:
          {
            // split the basic block right before the IA32_TRAPIF
            BasicBlock thisBlock = p.getBasicBlock();
            BasicBlock trap = thisBlock.createSubBlock(p.getBytecodeIndex(), ir, 0f);
            thisBlock.insertOut(trap);
            BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(p, ir);
            thisBlock.insertOut(trap);
            TrapCodeOperand tc = MIR_TrapIf.getClearTrapCode(p);
            p.remove();
            mcOffsets.setMachineCodeOffset(nextBlock.firstInstruction(), -1);
            // add code to thisBlock to conditionally jump to trap
            Instruction cmp =
                MIR_Compare.create(IA32_CMP, MIR_TrapIf.getVal1(p), MIR_TrapIf.getVal2(p));
            if (p.isMarkedAsPEI()) {
              // The trap if was explictly marked, which means that it has
              // a memory operand into which we've folded a null check.
              // Actually need a GC map for both the compare and the INT.
              cmp.markAsPEI();
              cmp.copyPosition(p);
              ir.MIRInfo.gcIRMap.insertTwin(p, cmp);
            }
            thisBlock.appendInstruction(cmp);
            thisBlock.appendInstruction(
                MIR_CondBranch.create(
                    IA32_JCC, MIR_TrapIf.getCond(p), trap.makeJumpTarget(), null));

            // add block at end to hold trap instruction, and
            // insert trap sequence
            ir.cfg.addLastInCodeOrder(trap);
            if (tc.isArrayBounds()) {
              // attempt to store index expression in processor object for
              // C trap handler
              Operand index = MIR_TrapIf.getVal2(p);
              if (!(index instanceof RegisterOperand || index instanceof IntConstantOperand)) {
                index = IC(0xdeadbeef); // index was spilled, and
                // we can't get it back here.
              }
              MemoryOperand mo =
                  MemoryOperand.BD(
                      ir.regpool.makeTROp(),
                      ArchEntrypoints.arrayIndexTrapParamField.getOffset(),
                      (byte) 4,
                      null,
                      null);
              trap.appendInstruction(MIR_Move.create(IA32_MOV, mo, index.copy()));
            }
            // NOTE: must make p the trap instruction: it is the GC point!
            // IMPORTANT: must also inform the GCMap that the instruction has
            // been moved!!!
            trap.appendInstruction(MIR_Trap.mutate(p, IA32_INT, null, tc));
            ir.MIRInfo.gcIRMap.moveToEnd(p);

            if (tc.isStackOverflow()) {
              // only stackoverflow traps resume at next instruction.
              trap.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget()));
            }
          }
          break;

        case IA32_FMOV_ENDING_LIVE_RANGE_opcode:
          {
            Operand result = MIR_Move.getResult(p);
            Operand value = MIR_Move.getValue(p);
            if (result.isRegister() && value.isRegister()) {
              if (result.similar(value)) {
                // eliminate useless move
                p.remove();
              } else {
                int i = PhysicalRegisterSet.getFPRIndex(result.asRegister().getRegister());
                int j = PhysicalRegisterSet.getFPRIndex(value.asRegister().getRegister());
                if (i == 0) {
                  MIR_XChng.mutate(p, IA32_FXCH, result, value);
                } else if (j == 0) {
                  MIR_XChng.mutate(p, IA32_FXCH, value, result);
                } else {
                  expandFmov(p, phys);
                }
              }
            } else {
              expandFmov(p, phys);
            }
            break;
          }

        case DUMMY_DEF_opcode:
        case DUMMY_USE_opcode:
        case REQUIRE_ESP_opcode:
        case ADVISE_ESP_opcode:
          p.remove();
          break;

        case IA32_FMOV_opcode:
          expandFmov(p, phys);
          break;

        case IA32_MOV_opcode:
          // Replace result = IA32_MOV 0 with result = IA32_XOR result, result
          if (MIR_Move.getResult(p).isRegister()
              && MIR_Move.getValue(p).isIntConstant()
              && MIR_Move.getValue(p).asIntConstant().value == 0) {
            // Calculate what flags are defined in coming instructions before a use of a flag or
            // BBend
            Instruction x = next;
            int futureDefs = 0;
            while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
              futureDefs |= x.operator().implicitDefs;
              x = x.nextInstructionInCodeOrder();
            }
            // If the flags will be destroyed prior to use or we reached the end of the basic block
            if (BBend.conforms(x)
                || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF)
                    == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
              Operand result = MIR_Move.getClearResult(p);
              MIR_BinaryAcc.mutate(p, IA32_XOR, result, result.copy());
            }
          }
          break;

        case IA32_SET__B_opcode:
          // Replace <cmp>, set__b, movzx__b with xor, <cmp>, set__b
          if (MIR_Set.getResult(p).isRegister()
              && MIR_Unary.conforms(next)
              && (next.operator() == IA32_MOVZX__B)
              && MIR_Unary.getResult(next).isRegister()
              && MIR_Unary.getVal(next).similar(MIR_Unary.getResult(next))
              && MIR_Unary.getVal(next).similar(MIR_Set.getResult(p))) {
            // Find instruction in this basic block that defines flags
            Instruction x = p.prevInstructionInCodeOrder();
            Operand result = MIR_Unary.getResult(next);
            boolean foundCmp = false;
            outer:
            while (!Label.conforms(x)) {
              Enumeration<Operand> e = x.getUses();
              while (e.hasMoreElements()) {
                // We can't use an xor to clear the register if that register is
                // used by the <cmp> or intervening instruction
                if (e.nextElement().similar(result)) {
                  break outer;
                }
              }
              if (PhysicalDefUse.definesEFLAGS(x.operator())
                  && !PhysicalDefUse.usesEFLAGS(x.operator())) {
                // we found a <cmp> that doesn't use the result or the flags
                // that would be clobbered by the xor
                foundCmp = true;
                break outer;
              }
              x = x.prevInstructionInCodeOrder();
            }
            if (foundCmp) {
              // We found the <cmp>, mutate the movzx__b into an xor and insert it before the <cmp>
              next.remove();
              MIR_BinaryAcc.mutate(next, IA32_XOR, result, MIR_Unary.getVal(next));
              x.insertBefore(next);
              // get ready for the next instruction
              next = p.nextInstructionInCodeOrder();
            }
          }
          break;

        case IA32_LEA_opcode:
          {
            // Sometimes we're over eager in BURS in using LEAs and after register
            // allocation we can simplify to the accumulate form
            // replace reg1 = LEA [reg1 + reg2] with reg1 = reg1 + reg2
            // replace reg1 = LEA [reg1 + c1] with reg1 = reg1 + c1
            // replace reg1 = LEA [reg1 << c1] with reg1 = reg1 << c1
            MemoryOperand value = MIR_Lea.getValue(p);
            RegisterOperand result = MIR_Lea.getResult(p);
            if ((value.base != null && value.base.getRegister() == result.getRegister())
                || (value.index != null && value.index.getRegister() == result.getRegister())) {
              // Calculate what flags are defined in coming instructions before a use of a flag or
              // BBend
              Instruction x = next;
              int futureDefs = 0;
              while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
                futureDefs |= x.operator().implicitDefs;
                x = x.nextInstructionInCodeOrder();
              }
              // If the flags will be destroyed prior to use or we reached the end of the basic
              // block
              if (BBend.conforms(x)
                  || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF)
                      == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
                if (value.base != null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.disp.isZero()
                    && value.scale == 0) {
                  // reg1 = lea [base + reg1] -> add reg1, base
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.base);
                } else if (value.base != null
                    && value.base.getRegister() == result.getRegister()
                    && value.index != null
                    && value.disp.isZero()
                    && value.scale == 0) {
                  // reg1 = lea [reg1 + index] -> add reg1, index
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
                } else if (value.base != null
                    && value.base.getRegister() == result.getRegister()
                    && value.index == null) {
                  if (VM.VerifyAssertions) VM._assert(fits(value.disp, 32));
                  // reg1 = lea [reg1 + disp] -> add reg1, disp
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
                } else if (value.base == null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.scale == 0) {
                  if (VM.VerifyAssertions) VM._assert(fits(value.disp, 32));
                  // reg1 = lea [reg1 + disp] -> add reg1, disp
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
                } else if (value.base == null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.disp.isZero()) {
                  // reg1 = lea [reg1 << scale] -> shl reg1, scale
                  if (value.scale == 0) {
                    p.remove();
                  } else if (value.scale == 1) {
                    MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
                  } else {
                    MIR_BinaryAcc.mutate(p, IA32_SHL, result, IC(value.scale));
                  }
                }
              }
            }
          }
          break;

        case IA32_FCLEAR_opcode:
          expandFClear(p, ir);
          break;

        case IA32_JCC2_opcode:
          p.insertBefore(
              MIR_CondBranch.create(
                  IA32_JCC,
                  MIR_CondBranch2.getCond1(p),
                  MIR_CondBranch2.getTarget1(p),
                  MIR_CondBranch2.getBranchProfile1(p)));
          MIR_CondBranch.mutate(
              p,
              IA32_JCC,
              MIR_CondBranch2.getCond2(p),
              MIR_CondBranch2.getTarget2(p),
              MIR_CondBranch2.getBranchProfile2(p));
          break;

        case CALL_SAVE_VOLATILE_opcode:
          p.changeOperatorTo(IA32_CALL);
          break;

        case IA32_LOCK_CMPXCHG_opcode:
          p.insertBefore(MIR_Empty.create(IA32_LOCK));
          p.changeOperatorTo(IA32_CMPXCHG);
          break;

        case IA32_LOCK_CMPXCHG8B_opcode:
          p.insertBefore(MIR_Empty.create(IA32_LOCK));
          p.changeOperatorTo(IA32_CMPXCHG8B);
          break;

        case YIELDPOINT_PROLOGUE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromPrologueMethod, IA32ConditionOperand.NE());
          break;

        case YIELDPOINT_EPILOGUE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromEpilogueMethod, IA32ConditionOperand.NE());
          break;

        case YIELDPOINT_BACKEDGE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromBackedgeMethod, IA32ConditionOperand.GT());
          break;

        case YIELDPOINT_OSR_opcode:
          // must yield, does not check threadSwitch request
          expandUnconditionalYieldpoint(p, ir, Entrypoints.optThreadSwitchFromOsrOptMethod);
          break;
      }
    }
    return 0;
  }