/**
  * Update the value graph to account for a given NullCheck instruction.
  *
  * <p><b>PRECONDITION:</b> <code> NullCheck.conforms(s); </code>
  *
  * @param s the instruction in question
  */
 private void processNullCheck(Instruction s) {
   // label the vertex corresponding to the result with the operator
   RegisterOperand result = NullCheck.getGuardResult(s);
   ValueGraphVertex v = findOrCreateVertex(result.getRegister());
   v.setLabel(s.operator(), 1);
   // link node v to the operand it uses
   Operand val = NullCheck.getRef(s);
   // bypass Move instructions
   val = bypassMoves(val);
   link(v, findOrCreateVertex(val), 0);
 }
 /**
  * Update the value graph to account for a given instruction.
  *
  * @param s the instruction in question
  */
 private void processInstruction(Instruction s) {
   // TODO: support all necessary types of instructions
   if (s.isDynamicLinkingPoint()) {
     processCall(s);
   } else if (Move.conforms(s)) {
     processMove(s);
   } else if (s.operator == PI) {
     processPi(s);
   } else if (New.conforms(s)) {
     processNew(s);
   } else if (NewArray.conforms(s)) {
     processNewArray(s);
   } else if (Unary.conforms(s)) {
     processUnary(s);
   } else if (GuardedUnary.conforms(s)) {
     processGuardedUnary(s);
   } else if (NullCheck.conforms(s)) {
     processNullCheck(s);
   } else if (ZeroCheck.conforms(s)) {
     processZeroCheck(s);
   } else if (Binary.conforms(s)) {
     processBinary(s);
   } else if (GuardedBinary.conforms(s)) {
     processGuardedBinary(s);
   } else if (InlineGuard.conforms(s)) {
     processInlineGuard(s);
   } else if (IfCmp.conforms(s)) {
     processIfCmp(s);
   } else if (Call.conforms(s)) {
     processCall(s);
   } else if (MonitorOp.conforms(s)) {
     processCall(s);
   } else if (Prepare.conforms(s)) {
     processCall(s);
   } else if (Attempt.conforms(s)) {
     processCall(s);
   } else if (CacheOp.conforms(s)) {
     processCall(s);
   } else if (ALoad.conforms(s)) {
     processALoad(s);
   } else if (PutField.conforms(s)) {
     processPutField(s);
   } else if (PutStatic.conforms(s)) {
     processPutStatic(s);
   } else if (AStore.conforms(s)) {
     processAStore(s);
   } else if (Phi.conforms(s)) {
     processPhi(s);
   } else if (s.operator() == IR_PROLOGUE) {
     processPrologue(s);
   }
 }
  /**
   * @param ir the IR to expand
   * @return return value is garbage for IA32
   */
  public static int expand(IR ir) {
    PhysicalRegisterSet phys = ir.regpool.getPhysicalRegisterSet().asIA32();
    MachineCodeOffsets mcOffsets = ir.MIRInfo.mcOffsets;

    for (Instruction next, p = ir.firstInstructionInCodeOrder(); p != null; p = next) {
      next = p.nextInstructionInCodeOrder();
      mcOffsets.setMachineCodeOffset(p, -1);

      switch (p.getOpcode()) {
        case IA32_MOVAPS_opcode:
          // a reg-reg move turned into a memory move where we can't guarantee alignment
          if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
            MIR_Move.mutate(p, IA32_MOVSS, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
          }
          break;

        case IA32_MOVAPD_opcode:
          // a reg-reg move turned into a memory move where we can't guarantee alignment
          if (MIR_Move.getResult(p).isMemory() || MIR_Move.getValue(p).isMemory()) {
            MIR_Move.mutate(p, IA32_MOVSD, MIR_Move.getClearResult(p), MIR_Move.getClearValue(p));
          }
          break;

        case IA32_TEST_opcode:
          // don't bother telling rest of compiler that memory operand
          // must be first; we can just commute it here.
          if (MIR_Test.getVal2(p).isMemory()) {
            Operand tmp = MIR_Test.getClearVal1(p);
            MIR_Test.setVal1(p, MIR_Test.getClearVal2(p));
            MIR_Test.setVal2(p, tmp);
          }
          break;

        case NULL_CHECK_opcode:
          {
            // mutate this into a TRAPIF, and then fall through to the the
            // TRAP_IF case.
            Operand ref = NullCheck.getRef(p);
            MIR_TrapIf.mutate(
                p,
                IA32_TRAPIF,
                null,
                ref.copy(),
                IC(0),
                IA32ConditionOperand.EQ(),
                TrapCodeOperand.NullPtr());
          }
          // There is no break statement here on purpose!
        case IA32_TRAPIF_opcode:
          {
            // split the basic block right before the IA32_TRAPIF
            BasicBlock thisBlock = p.getBasicBlock();
            BasicBlock trap = thisBlock.createSubBlock(p.getBytecodeIndex(), ir, 0f);
            thisBlock.insertOut(trap);
            BasicBlock nextBlock = thisBlock.splitNodeWithLinksAt(p, ir);
            thisBlock.insertOut(trap);
            TrapCodeOperand tc = MIR_TrapIf.getClearTrapCode(p);
            p.remove();
            mcOffsets.setMachineCodeOffset(nextBlock.firstInstruction(), -1);
            // add code to thisBlock to conditionally jump to trap
            Instruction cmp =
                MIR_Compare.create(IA32_CMP, MIR_TrapIf.getVal1(p), MIR_TrapIf.getVal2(p));
            if (p.isMarkedAsPEI()) {
              // The trap if was explictly marked, which means that it has
              // a memory operand into which we've folded a null check.
              // Actually need a GC map for both the compare and the INT.
              cmp.markAsPEI();
              cmp.copyPosition(p);
              ir.MIRInfo.gcIRMap.insertTwin(p, cmp);
            }
            thisBlock.appendInstruction(cmp);
            thisBlock.appendInstruction(
                MIR_CondBranch.create(
                    IA32_JCC, MIR_TrapIf.getCond(p), trap.makeJumpTarget(), null));

            // add block at end to hold trap instruction, and
            // insert trap sequence
            ir.cfg.addLastInCodeOrder(trap);
            if (tc.isArrayBounds()) {
              // attempt to store index expression in processor object for
              // C trap handler
              Operand index = MIR_TrapIf.getVal2(p);
              if (!(index instanceof RegisterOperand || index instanceof IntConstantOperand)) {
                index = IC(0xdeadbeef); // index was spilled, and
                // we can't get it back here.
              }
              MemoryOperand mo =
                  MemoryOperand.BD(
                      ir.regpool.makeTROp(),
                      ArchEntrypoints.arrayIndexTrapParamField.getOffset(),
                      (byte) 4,
                      null,
                      null);
              trap.appendInstruction(MIR_Move.create(IA32_MOV, mo, index.copy()));
            }
            // NOTE: must make p the trap instruction: it is the GC point!
            // IMPORTANT: must also inform the GCMap that the instruction has
            // been moved!!!
            trap.appendInstruction(MIR_Trap.mutate(p, IA32_INT, null, tc));
            ir.MIRInfo.gcIRMap.moveToEnd(p);

            if (tc.isStackOverflow()) {
              // only stackoverflow traps resume at next instruction.
              trap.appendInstruction(MIR_Branch.create(IA32_JMP, nextBlock.makeJumpTarget()));
            }
          }
          break;

        case IA32_FMOV_ENDING_LIVE_RANGE_opcode:
          {
            Operand result = MIR_Move.getResult(p);
            Operand value = MIR_Move.getValue(p);
            if (result.isRegister() && value.isRegister()) {
              if (result.similar(value)) {
                // eliminate useless move
                p.remove();
              } else {
                int i = PhysicalRegisterSet.getFPRIndex(result.asRegister().getRegister());
                int j = PhysicalRegisterSet.getFPRIndex(value.asRegister().getRegister());
                if (i == 0) {
                  MIR_XChng.mutate(p, IA32_FXCH, result, value);
                } else if (j == 0) {
                  MIR_XChng.mutate(p, IA32_FXCH, value, result);
                } else {
                  expandFmov(p, phys);
                }
              }
            } else {
              expandFmov(p, phys);
            }
            break;
          }

        case DUMMY_DEF_opcode:
        case DUMMY_USE_opcode:
        case REQUIRE_ESP_opcode:
        case ADVISE_ESP_opcode:
          p.remove();
          break;

        case IA32_FMOV_opcode:
          expandFmov(p, phys);
          break;

        case IA32_MOV_opcode:
          // Replace result = IA32_MOV 0 with result = IA32_XOR result, result
          if (MIR_Move.getResult(p).isRegister()
              && MIR_Move.getValue(p).isIntConstant()
              && MIR_Move.getValue(p).asIntConstant().value == 0) {
            // Calculate what flags are defined in coming instructions before a use of a flag or
            // BBend
            Instruction x = next;
            int futureDefs = 0;
            while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
              futureDefs |= x.operator().implicitDefs;
              x = x.nextInstructionInCodeOrder();
            }
            // If the flags will be destroyed prior to use or we reached the end of the basic block
            if (BBend.conforms(x)
                || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF)
                    == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
              Operand result = MIR_Move.getClearResult(p);
              MIR_BinaryAcc.mutate(p, IA32_XOR, result, result.copy());
            }
          }
          break;

        case IA32_SET__B_opcode:
          // Replace <cmp>, set__b, movzx__b with xor, <cmp>, set__b
          if (MIR_Set.getResult(p).isRegister()
              && MIR_Unary.conforms(next)
              && (next.operator() == IA32_MOVZX__B)
              && MIR_Unary.getResult(next).isRegister()
              && MIR_Unary.getVal(next).similar(MIR_Unary.getResult(next))
              && MIR_Unary.getVal(next).similar(MIR_Set.getResult(p))) {
            // Find instruction in this basic block that defines flags
            Instruction x = p.prevInstructionInCodeOrder();
            Operand result = MIR_Unary.getResult(next);
            boolean foundCmp = false;
            outer:
            while (!Label.conforms(x)) {
              Enumeration<Operand> e = x.getUses();
              while (e.hasMoreElements()) {
                // We can't use an xor to clear the register if that register is
                // used by the <cmp> or intervening instruction
                if (e.nextElement().similar(result)) {
                  break outer;
                }
              }
              if (PhysicalDefUse.definesEFLAGS(x.operator())
                  && !PhysicalDefUse.usesEFLAGS(x.operator())) {
                // we found a <cmp> that doesn't use the result or the flags
                // that would be clobbered by the xor
                foundCmp = true;
                break outer;
              }
              x = x.prevInstructionInCodeOrder();
            }
            if (foundCmp) {
              // We found the <cmp>, mutate the movzx__b into an xor and insert it before the <cmp>
              next.remove();
              MIR_BinaryAcc.mutate(next, IA32_XOR, result, MIR_Unary.getVal(next));
              x.insertBefore(next);
              // get ready for the next instruction
              next = p.nextInstructionInCodeOrder();
            }
          }
          break;

        case IA32_LEA_opcode:
          {
            // Sometimes we're over eager in BURS in using LEAs and after register
            // allocation we can simplify to the accumulate form
            // replace reg1 = LEA [reg1 + reg2] with reg1 = reg1 + reg2
            // replace reg1 = LEA [reg1 + c1] with reg1 = reg1 + c1
            // replace reg1 = LEA [reg1 << c1] with reg1 = reg1 << c1
            MemoryOperand value = MIR_Lea.getValue(p);
            RegisterOperand result = MIR_Lea.getResult(p);
            if ((value.base != null && value.base.getRegister() == result.getRegister())
                || (value.index != null && value.index.getRegister() == result.getRegister())) {
              // Calculate what flags are defined in coming instructions before a use of a flag or
              // BBend
              Instruction x = next;
              int futureDefs = 0;
              while (!BBend.conforms(x) && !PhysicalDefUse.usesEFLAGS(x.operator())) {
                futureDefs |= x.operator().implicitDefs;
                x = x.nextInstructionInCodeOrder();
              }
              // If the flags will be destroyed prior to use or we reached the end of the basic
              // block
              if (BBend.conforms(x)
                  || (futureDefs & PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF)
                      == PhysicalDefUse.maskAF_CF_OF_PF_SF_ZF) {
                if (value.base != null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.disp.isZero()
                    && value.scale == 0) {
                  // reg1 = lea [base + reg1] -> add reg1, base
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.base);
                } else if (value.base != null
                    && value.base.getRegister() == result.getRegister()
                    && value.index != null
                    && value.disp.isZero()
                    && value.scale == 0) {
                  // reg1 = lea [reg1 + index] -> add reg1, index
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
                } else if (value.base != null
                    && value.base.getRegister() == result.getRegister()
                    && value.index == null) {
                  if (VM.VerifyAssertions) VM._assert(fits(value.disp, 32));
                  // reg1 = lea [reg1 + disp] -> add reg1, disp
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
                } else if (value.base == null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.scale == 0) {
                  if (VM.VerifyAssertions) VM._assert(fits(value.disp, 32));
                  // reg1 = lea [reg1 + disp] -> add reg1, disp
                  MIR_BinaryAcc.mutate(p, IA32_ADD, result, IC(value.disp.toInt()));
                } else if (value.base == null
                    && value.index != null
                    && value.index.getRegister() == result.getRegister()
                    && value.disp.isZero()) {
                  // reg1 = lea [reg1 << scale] -> shl reg1, scale
                  if (value.scale == 0) {
                    p.remove();
                  } else if (value.scale == 1) {
                    MIR_BinaryAcc.mutate(p, IA32_ADD, result, value.index);
                  } else {
                    MIR_BinaryAcc.mutate(p, IA32_SHL, result, IC(value.scale));
                  }
                }
              }
            }
          }
          break;

        case IA32_FCLEAR_opcode:
          expandFClear(p, ir);
          break;

        case IA32_JCC2_opcode:
          p.insertBefore(
              MIR_CondBranch.create(
                  IA32_JCC,
                  MIR_CondBranch2.getCond1(p),
                  MIR_CondBranch2.getTarget1(p),
                  MIR_CondBranch2.getBranchProfile1(p)));
          MIR_CondBranch.mutate(
              p,
              IA32_JCC,
              MIR_CondBranch2.getCond2(p),
              MIR_CondBranch2.getTarget2(p),
              MIR_CondBranch2.getBranchProfile2(p));
          break;

        case CALL_SAVE_VOLATILE_opcode:
          p.changeOperatorTo(IA32_CALL);
          break;

        case IA32_LOCK_CMPXCHG_opcode:
          p.insertBefore(MIR_Empty.create(IA32_LOCK));
          p.changeOperatorTo(IA32_CMPXCHG);
          break;

        case IA32_LOCK_CMPXCHG8B_opcode:
          p.insertBefore(MIR_Empty.create(IA32_LOCK));
          p.changeOperatorTo(IA32_CMPXCHG8B);
          break;

        case YIELDPOINT_PROLOGUE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromPrologueMethod, IA32ConditionOperand.NE());
          break;

        case YIELDPOINT_EPILOGUE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromEpilogueMethod, IA32ConditionOperand.NE());
          break;

        case YIELDPOINT_BACKEDGE_opcode:
          expandYieldpoint(
              p, ir, Entrypoints.optThreadSwitchFromBackedgeMethod, IA32ConditionOperand.GT());
          break;

        case YIELDPOINT_OSR_opcode:
          // must yield, does not check threadSwitch request
          expandUnconditionalYieldpoint(p, ir, Entrypoints.optThreadSwitchFromOsrOptMethod);
          break;
      }
    }
    return 0;
  }