[llvm] 4b44963 - AMDGPU: Start adding MODE register uses to instructions

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed May 27 11:47:11 PDT 2020


Author: Matt Arsenault
Date: 2020-05-27T14:47:00-04:00
New Revision: 4b4496312e3380d8c427ef836f2b0a38d145652b

URL: https://github.com/llvm/llvm-project/commit/4b4496312e3380d8c427ef836f2b0a38d145652b
DIFF: https://github.com/llvm/llvm-project/commit/4b4496312e3380d8c427ef836f2b0a38d145652b.diff

LOG: AMDGPU: Start adding MODE register uses to instructions

This is the groundwork required to implement strictfp. For now, this
should be NFC for regular instructoins (many instructions just gain an
extra use of a reserved register). Regalloc won't rematerialize
instructions with reads of physical registers, but we were suffering
from that anyway with the exec reads.

Should add it for all the related FP uses (possibly with some
extras). I did not add it to either the gpr index mode instructions
(or every single VALU instruction) since it's a ridiculous feature
already modeled as an arbitrary side effect.

Also work towards marking instructions with FP exceptions. This
doesn't actually set the bit yet since this would start to change
codegen. It seems nofpexcept is currently not implied from the regular
IR FP operations. Add it to some MIR tests where I think it might
matter.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIInstrFormats.td
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/lib/Target/AMDGPU/VOP1Instructions.td
    llvm/lib/Target/AMDGPU/VOP2Instructions.td
    llvm/lib/Target/AMDGPU/VOP3Instructions.td
    llvm/lib/Target/AMDGPU/VOP3PInstructions.td
    llvm/lib/Target/AMDGPU/VOPCInstructions.td
    llvm/lib/Target/AMDGPU/VOPInstructions.td
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmad.ftz.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmad.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
    llvm/test/CodeGen/AMDGPU/bundle-latency.mir
    llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
    llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
    llvm/test/CodeGen/AMDGPU/coalescer-subregjoin-fullcopy.mir
    llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
    llvm/test/CodeGen/AMDGPU/couldnt-join-subrange-3.mir
    llvm/test/CodeGen/AMDGPU/dead-lane.mir
    llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir
    llvm/test/CodeGen/AMDGPU/dpp_combine.mir
    llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
    llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
    llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
    llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
    llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir
    llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
    llvm/test/CodeGen/AMDGPU/hazard-buffer-store-v-interp.mir
    llvm/test/CodeGen/AMDGPU/hazard-hidden-bundle.mir
    llvm/test/CodeGen/AMDGPU/hazard-in-bundle.mir
    llvm/test/CodeGen/AMDGPU/hazard-kill.mir
    llvm/test/CodeGen/AMDGPU/hazard.mir
    llvm/test/CodeGen/AMDGPU/insert-waitcnts-callee.mir
    llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
    llvm/test/CodeGen/AMDGPU/madak-inline-constant.mir
    llvm/test/CodeGen/AMDGPU/mai-hazards.mir
    llvm/test/CodeGen/AMDGPU/merge-m0.mir
    llvm/test/CodeGen/AMDGPU/mode-register.mir
    llvm/test/CodeGen/AMDGPU/movrels-bug.mir
    llvm/test/CodeGen/AMDGPU/omod-nsz-flag.mir
    llvm/test/CodeGen/AMDGPU/power-sched-no-instr-sunit.mir
    llvm/test/CodeGen/AMDGPU/regcoal-subrange-join-seg.mir
    llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
    llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
    llvm/test/CodeGen/AMDGPU/regcoalescing-remove-partial-redundancy-assert.mir
    llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
    llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir
    llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
    llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
    llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
    llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
    llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
    llvm/test/CodeGen/AMDGPU/shrink-instructions-flags.mir
    llvm/test/CodeGen/AMDGPU/smem-war-hazard.mir
    llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
    llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
    llvm/test/CodeGen/AMDGPU/twoaddr-fma.mir
    llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
    llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
    llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
    llvm/test/CodeGen/AMDGPU/vcmpx-permlane-hazard.mir
    llvm/test/CodeGen/AMDGPU/vmem-to-salu-hazard.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-overflow.mir
    llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
    llvm/unittests/MI/LiveIntervalTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
index a4d11780118f..428c21c896d5 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
@@ -114,6 +114,9 @@ class InstSI <dag outs, dag ins, string asm = "",
   // FLAT_SCRATCH segment. Must be 0 for non-FLAT instructions.
   field bit IsNonFlatSeg = 0;
 
+  // Reads the mode register, usually for FP environment.
+  field bit ReadsModeReg = 0;
+
   // This bit indicates that this uses the floating point double precision
   // rounding mode flags
   field bit FPDPRounding = 0;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 400bbe2c0ed4..fff2189498e1 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -64,7 +64,7 @@ def : ExpComprPattern<v2f16, EXP_DONE, -1>;
 // Used to inject printing of "_e32" suffix for VI (there are "_e64" variants for VI)
 def VINTRPDst : VINTRPDstOperand <VGPR_32>;
 
-let Uses = [M0, EXEC] in {
+let Uses = [MODE, M0, EXEC] in {
 
 // FIXME: Specify SchedRW for VINTRP instructions.
 
@@ -109,7 +109,7 @@ defm V_INTERP_MOV_F32 : VINTRP_m <
   [(set f32:$vdst, (int_amdgcn_interp_mov (i32 timm:$vsrc),
                    (i32 timm:$attrchan), (i32 timm:$attr), M0))]>;
 
-} // End Uses = [M0, EXEC]
+} // End Uses = [MODE, M0, EXEC]
 
 //===----------------------------------------------------------------------===//
 // Pseudo Instructions

diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index e9dbe93fa3ce..4f9aaa1bc604 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -807,8 +807,10 @@ def S_SETREG_B32 : SOPK_Pseudo <
   "s_setreg_b32",
   (outs), (ins SReg_32:$sdst, hwreg:$simm16),
   "$simm16, $sdst",
-  [(AMDGPUsetreg i32:$sdst, (i16 timm:$simm16))]
->;
+  [(AMDGPUsetreg i32:$sdst, (i16 timm:$simm16))]> {
+  let Defs = [MODE];
+  let Uses = [MODE];
+}
 
 // FIXME: Not on SI?
 //def S_GETREG_REGRD_B32 : SOPK_32 <sopk<0x14, 0x13>, "s_getreg_regrd_b32">;
@@ -819,6 +821,8 @@ def S_SETREG_IMM32_B32 : SOPK_Pseudo <
   "$simm16, $imm"> {
   let Size = 8; // Unlike every other SOPK instruction.
   let has_sdst = 0;
+  let Defs = [MODE];
+  let Uses = [MODE];
 }
 
 } // End hasSideEffects = 1
@@ -953,6 +957,10 @@ def S_CMP_LG_U64 : SOPC_CMP_64 <0x13, "s_cmp_lg_u64", COND_NE>;
 } // End SubtargetPredicate = isGFX8Plus
 
 let SubtargetPredicate = HasVGPRIndexMode in {
+// Setting the GPR index mode is really writing the fields in the mode
+// register. We don't want to add mode register uses to every
+// instruction, and it's too complicated to deal with anyway. This is
+// modeled just as a side effect.
 def S_SET_GPR_IDX_ON : SOPC <0x11,
   (outs),
   (ins SSrc_b32:$src0, GPRIdxMode:$src1),
@@ -1209,13 +1217,16 @@ let SubtargetPredicate = isGFX10Plus in {
     }
   def S_WAITCNT_DEPCTR :
     SOPP <0x023, (ins s16imm:$simm16), "s_waitcnt_depctr $simm16">;
-  def S_ROUND_MODE :
-    SOPP<0x024, (ins s16imm:$simm16), "s_round_mode $simm16">;
-  def S_DENORM_MODE :
-    SOPP<0x025, (ins i32imm:$simm16), "s_denorm_mode $simm16",
-    [(SIdenorm_mode (i32 timm:$simm16))]> {
-      let hasSideEffects = 1;
-    }
+
+  let hasSideEffects = 1, Uses = [MODE], Defs = [MODE] in {
+    // FIXME: Should remove hasSideEffects
+    def S_ROUND_MODE :
+      SOPP<0x024, (ins s16imm:$simm16), "s_round_mode $simm16">;
+    def S_DENORM_MODE :
+      SOPP<0x025, (ins i32imm:$simm16), "s_denorm_mode $simm16",
+      [(SIdenorm_mode (i32 timm:$simm16))]>;
+  }
+
   def S_TTRACEDATA_IMM :
     SOPP<0x028, (ins s16imm:$simm16), "s_ttracedata_imm $simm16">;
 } // End SubtargetPredicate = isGFX10Plus

diff  --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 0c2b5fbf59fc..e46d84d513cc 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -48,9 +48,14 @@ class VOP1_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], bit VOP1On
   let mayStore = 0;
   let hasSideEffects = 0;
 
+  let ReadsModeReg = !or(isFloatType<P.DstVT>.ret, isFloatType<P.Src0VT>.ret);
+
+  // FIXME
+  // let mayRaiseFPException = ReadsModeReg;
+
   let VOP1 = 1;
   let VALU = 1;
-  let Uses = [EXEC];
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
 
   let AsmVariantName = AMDGPUAsmVariants.Default;
 }
@@ -186,31 +191,51 @@ def V_READFIRSTLANE_B32 :
 
 let SchedRW = [WriteDoubleCvt] in {
 defm V_CVT_I32_F64 : VOP1Inst <"v_cvt_i32_f64", VOP_I32_F64,  fp_to_sint>;
+
+let mayRaiseFPException = 0 in {
 defm V_CVT_F64_I32 : VOP1Inst <"v_cvt_f64_i32", VOP1_F64_I32, sint_to_fp>;
+}
+
 defm V_CVT_F32_F64 : VOP1Inst <"v_cvt_f32_f64", VOP_F32_F64,  fpround>;
 defm V_CVT_F64_F32 : VOP1Inst <"v_cvt_f64_f32", VOP_F64_F32,  fpextend>;
 defm V_CVT_U32_F64 : VOP1Inst <"v_cvt_u32_f64", VOP_I32_F64,  fp_to_uint>;
+
+let mayRaiseFPException = 0 in {
 defm V_CVT_F64_U32 : VOP1Inst <"v_cvt_f64_u32", VOP1_F64_I32, uint_to_fp>;
+}
+
 } // End SchedRW = [WriteDoubleCvt]
 
 let SchedRW = [WriteFloatCvt] in {
+
+// XXX: Does this really not raise exceptions? The manual claims the
+// 16-bit ones can.
+let mayRaiseFPException = 0 in {
 defm V_CVT_F32_I32 : VOP1Inst <"v_cvt_f32_i32", VOP1_F32_I32, sint_to_fp>;
 defm V_CVT_F32_U32 : VOP1Inst <"v_cvt_f32_u32", VOP1_F32_I32, uint_to_fp>;
+}
+
 defm V_CVT_U32_F32 : VOP1Inst <"v_cvt_u32_f32", VOP_I32_F32, fp_to_uint>;
 defm V_CVT_I32_F32 : VOP1Inst <"v_cvt_i32_f32", VOP_I32_F32, fp_to_sint>;
 let FPDPRounding = 1 in {
 defm V_CVT_F16_F32 : VOP1Inst <"v_cvt_f16_f32", VOP_F16_F32, fpround>;
 } // End FPDPRounding = 1
+
 defm V_CVT_F32_F16 : VOP1Inst <"v_cvt_f32_f16", VOP_F32_F16, fpextend>;
+
+let ReadsModeReg = 0, mayRaiseFPException = 0 in {
 defm V_CVT_RPI_I32_F32 : VOP1Inst <"v_cvt_rpi_i32_f32", VOP_I32_F32, cvt_rpi_i32_f32>;
 defm V_CVT_FLR_I32_F32 : VOP1Inst <"v_cvt_flr_i32_f32", VOP_I32_F32, cvt_flr_i32_f32>;
 defm V_CVT_OFF_F32_I4 : VOP1Inst  <"v_cvt_off_f32_i4", VOP1_F32_I32>;
+} // End ReadsModeReg = 0, mayRaiseFPException = 0
 } // End SchedRW = [WriteFloatCvt]
 
+let ReadsModeReg = 0, mayRaiseFPException = 0 in {
 defm V_CVT_F32_UBYTE0 : VOP1Inst <"v_cvt_f32_ubyte0", VOP1_F32_I32, AMDGPUcvt_f32_ubyte0>;
 defm V_CVT_F32_UBYTE1 : VOP1Inst <"v_cvt_f32_ubyte1", VOP1_F32_I32, AMDGPUcvt_f32_ubyte1>;
 defm V_CVT_F32_UBYTE2 : VOP1Inst <"v_cvt_f32_ubyte2", VOP1_F32_I32, AMDGPUcvt_f32_ubyte2>;
 defm V_CVT_F32_UBYTE3 : VOP1Inst <"v_cvt_f32_ubyte3", VOP1_F32_I32, AMDGPUcvt_f32_ubyte3>;
+} // ReadsModeReg = 0, mayRaiseFPException = 0
 
 defm V_FRACT_F32 : VOP1Inst <"v_fract_f32", VOP_F32_F32, AMDGPUfract>;
 defm V_TRUNC_F32 : VOP1Inst <"v_trunc_f32", VOP_F32_F32, ftrunc>;
@@ -417,8 +442,11 @@ let SubtargetPredicate = isGFX9Plus in {
   }
 
   defm V_SAT_PK_U8_I16    : VOP1Inst<"v_sat_pk_u8_i16", VOP_I32_I32>;
-  defm V_CVT_NORM_I16_F16 : VOP1Inst<"v_cvt_norm_i16_f16", VOP_I16_F16>;
-  defm V_CVT_NORM_U16_F16 : VOP1Inst<"v_cvt_norm_u16_f16", VOP_I16_F16>;
+
+  let mayRaiseFPException = 0 in {
+    defm V_CVT_NORM_I16_F16 : VOP1Inst<"v_cvt_norm_i16_f16", VOP_I16_F16>;
+    defm V_CVT_NORM_U16_F16 : VOP1Inst<"v_cvt_norm_u16_f16", VOP_I16_F16>;
+  } // End mayRaiseFPException = 0
 } // End SubtargetPredicate = isGFX9Plus
 
 let SubtargetPredicate = isGFX9Only in {

diff  --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index c1ce1b755322..4927c6c2f3f2 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -69,9 +69,14 @@ class VOP2_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], string suf
   let mayStore = 0;
   let hasSideEffects = 0;
 
+  let ReadsModeReg = !or(isFloatType<P.DstVT>.ret, isFloatType<P.Src0VT>.ret);
+
+  // FIXME: Set this
+  // let mayRaiseFPException = ReadsModeReg;
+
   let VOP2 = 1;
   let VALU = 1;
-  let Uses = [EXEC];
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
 
   let AsmVariantName = AMDGPUAsmVariants.Default;
 }
@@ -529,8 +534,12 @@ defm V_MBCNT_LO_U32_B32 : VOP2Inst <"v_mbcnt_lo_u32_b32", VOP_NO_EXT<VOP_I32_I32
 defm V_MBCNT_HI_U32_B32 : VOP2Inst <"v_mbcnt_hi_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_hi>;
 defm V_LDEXP_F32 : VOP2Inst <"v_ldexp_f32", VOP_NO_EXT<VOP_F32_F32_I32>, AMDGPUldexp>;
 defm V_CVT_PKACCUM_U8_F32 : VOP2Inst <"v_cvt_pkaccum_u8_f32", VOP_NO_EXT<VOP_I32_F32_I32>>; // TODO: set "Uses = dst"
+
+let ReadsModeReg = 0, mayRaiseFPException = 0 in {
 defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_NO_EXT<VOP_V2I16_F32_F32>, AMDGPUpknorm_i16_f32>;
 defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_NO_EXT<VOP_V2I16_F32_F32>, AMDGPUpknorm_u16_f32>;
+}
+
 defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <"v_cvt_pkrtz_f16_f32", VOP_NO_EXT<VOP_V2F16_F32_F32>, AMDGPUpkrtz_f16_f32>;
 defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_NO_EXT<VOP_V2I16_I32_I32>, AMDGPUpk_u16_u32>;
 defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_NO_EXT<VOP_V2I16_I32_I32>, AMDGPUpk_i16_i32>;

diff  --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 249eb69ba4c9..7e1ac7509719 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -290,8 +290,11 @@ class VOP3_INTERP16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> {
 
 let isCommutable = 1 in {
 
+let mayRaiseFPException = 0 in {
 def V_MAD_LEGACY_F32 : VOP3Inst <"v_mad_legacy_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
 def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;
+}
+
 def V_MAD_I32_I24 : VOP3Inst <"v_mad_i32_i24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 def V_MAD_U32_U24 : VOP3Inst <"v_mad_u32_u24", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 def V_FMA_F32 : VOP3Inst <"v_fma_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fma>;
@@ -314,7 +317,7 @@ def V_MUL_LO_I32 : VOP3Inst <"v_mul_lo_i32", VOP3_Profile<VOP_I32_I32_I32>>;
 def V_MUL_HI_I32 : VOP3Inst <"v_mul_hi_i32", VOP3_Profile<VOP_I32_I32_I32>, mulhs>;
 } // End SchedRW = [WriteQuarterRate32]
 
-let Uses = [VCC, EXEC] in {
+let Uses = [MODE, VCC, EXEC] in {
 // v_div_fmas_f32:
 //   result = src0 * src1 + src2
 //   if (vcc)
@@ -336,15 +339,20 @@ def V_DIV_FMAS_F64 : VOP3_Pseudo <"v_div_fmas_f64", VOP_F64_F64_F64_F64_VCC, []>
 
 } // End isCommutable = 1
 
+let mayRaiseFPException = 0 in {
 def V_CUBEID_F32 : VOP3Inst <"v_cubeid_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubeid>;
 def V_CUBESC_F32 : VOP3Inst <"v_cubesc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubesc>;
 def V_CUBETC_F32 : VOP3Inst <"v_cubetc_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubetc>;
 def V_CUBEMA_F32 : VOP3Inst <"v_cubema_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, int_amdgcn_cubema>;
+} // End mayRaiseFPException
+
 def V_BFE_U32 : VOP3Inst <"v_bfe_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_u32>;
 def V_BFE_I32 : VOP3Inst <"v_bfe_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfe_i32>;
 def V_BFI_B32 : VOP3Inst <"v_bfi_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUbfi>;
 def V_ALIGNBIT_B32 : VOP3Inst <"v_alignbit_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, fshr>;
 def V_ALIGNBYTE_B32 : VOP3Inst <"v_alignbyte_b32", VOP3_Profile<VOP_I32_I32_I32_I32>, int_amdgcn_alignbyte>;
+
+let mayRaiseFPException = 0 in { // XXX - Seems suspect but manual doesn't say it does
 def V_MIN3_F32 : VOP3Inst <"v_min3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmin3>;
 def V_MIN3_I32 : VOP3Inst <"v_min3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmin3>;
 def V_MIN3_U32 : VOP3Inst <"v_min3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumin3>;
@@ -354,6 +362,8 @@ def V_MAX3_U32 : VOP3Inst <"v_max3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDG
 def V_MED3_F32 : VOP3Inst <"v_med3_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, AMDGPUfmed3>;
 def V_MED3_I32 : VOP3Inst <"v_med3_i32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUsmed3>;
 def V_MED3_U32 : VOP3Inst <"v_med3_u32", VOP3_Profile<VOP_I32_I32_I32_I32>, AMDGPUumed3>;
+} // End mayRaiseFPException = 0
+
 def V_SAD_U8 : VOP3Inst <"v_sad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 def V_SAD_HI_U8 : VOP3Inst <"v_sad_hi_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 def V_SAD_U16 : VOP3Inst <"v_sad_u16", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
@@ -366,6 +376,8 @@ def V_DIV_FIXUP_F64 : VOP3Inst <"v_div_fixup_f64", VOP3_Profile<VOP_F64_F64_F64_
 def V_LDEXP_F64 : VOP3Inst <"v_ldexp_f64", VOP3_Profile<VOP_F64_F64_I32>, AMDGPUldexp, 1>;
 } // End SchedRW = [WriteDoubleAdd], FPDPRounding = 1
 
+
+let mayRaiseFPException = 0 in { // Seems suspicious but manual doesn't say it does.
 def V_DIV_SCALE_F32 : VOP3_Pseudo <"v_div_scale_f32", VOP3b_F32_I1_F32_F32_F32, [], 1> {
   let SchedRW = [WriteFloatFMA, WriteSALU];
   let AsmMatchConverter = "";
@@ -377,6 +389,7 @@ def V_DIV_SCALE_F64 : VOP3_Pseudo <"v_div_scale_f64", VOP3b_F64_I1_F64_F64_F64,
   let AsmMatchConverter = "";
   let FPDPRounding = 1;
 }
+} // End mayRaiseFPException = 0
 
 def V_MSAD_U8 : VOP3Inst <"v_msad_u8", VOP3_Profile<VOP_I32_I32_I32_I32, VOP3_CLAMP>>;
 
@@ -471,7 +484,7 @@ def V_MAD_U16 : VOP3Inst <"v_mad_u16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CL
 def V_MAD_I16 : VOP3Inst <"v_mad_i16", VOP3_Profile<VOP_I16_I16_I16_I16, VOP3_CLAMP>>;
 let FPDPRounding = 1 in {
 def V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>;
-let Uses = [M0, EXEC] in {
+let Uses = [MODE, M0, EXEC] in {
 // For some reason the intrinsic operands are in a 
diff erent order
 // from the instruction operands.
 def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i32, f32]>,
@@ -482,7 +495,7 @@ def V_INTERP_P2_F16 : VOP3Interp <"v_interp_p2_f16", VOP3_INTERP16<[f16, f32, i3
                                    (i32 timm:$attr),
                                    (i1 timm:$high),
                                    M0))]>;
-} // End Uses = [M0, EXEC]
+} // End Uses = [M0, MODE, EXEC]
 } // End FPDPRounding = 1
 } // End renamedInGFX9 = 1
 
@@ -498,7 +511,7 @@ def V_MAD_I16_gfx9   : VOP3Inst <"v_mad_i16_gfx9", VOP3_Profile<VOP_I16_I16_I16_
 def V_INTERP_P2_F16_gfx9 : VOP3Interp <"v_interp_p2_f16_gfx9", VOP3_INTERP16<[f16, f32, i32, f32]>>;
 } // End SubtargetPredicate = isGFX9Plus
 
-let Uses = [M0, EXEC], FPDPRounding = 1 in {
+let Uses = [MODE, M0, EXEC], FPDPRounding = 1 in {
 def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32, i32, untyped]>,
        [(set f32:$vdst, (int_amdgcn_interp_p1_f16 (VOP3Mods f32:$src0, i32:$src0_modifiers),
                                                   (i32 timm:$attrchan),
@@ -512,15 +525,15 @@ def V_INTERP_P1LL_F16 : VOP3Interp <"v_interp_p1ll_f16", VOP3_INTERP16<[f32, f32
 
 
 def V_INTERP_P1LV_F16 : VOP3Interp <"v_interp_p1lv_f16", VOP3_INTERP16<[f32, f32, i32, f16]>>;
-} // End Uses = [M0, EXEC], FPDPRounding = 1
+} // End Uses = [MODE, M0, EXEC], FPDPRounding = 1
 
 } // End SubtargetPredicate = Has16BitInsts, isCommutable = 1
 
-let SubtargetPredicate = isGFX8Plus, Uses = [M0, EXEC] in {
+let SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC] in {
 def V_INTERP_P1_F32_e64  : VOP3Interp <"v_interp_p1_f32", VOP3_INTERP>;
 def V_INTERP_P2_F32_e64  : VOP3Interp <"v_interp_p2_f32", VOP3_INTERP>;
 def V_INTERP_MOV_F32_e64 : VOP3Interp <"v_interp_mov_f32", VOP3_INTERP_MOV>;
-} // End SubtargetPredicate = isGFX8Plus, Uses = [M0, EXEC]
+} // End SubtargetPredicate = isGFX8Plus, Uses = [MODE, M0, EXEC]
 
 let Predicates = [Has16BitInsts, isGFX6GFX7GFX8GFX9] in {
 

diff  --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 25075e179847..a3d973fc2f18 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -149,10 +149,11 @@ multiclass MadFmaMixPats<SDPatternOperator fma_like,
 }
 
 let SubtargetPredicate = HasMadMixInsts in {
+
 // These are VOP3a-like opcodes which accept no omod.
 // Size of src arguments (16/32) is controlled by op_sel.
 // For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi.
-let isCommutable = 1 in {
+let isCommutable = 1, mayRaiseFPException = 0 in {
 def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile<VOP_F32_F16_F16_F16, VOP3_OPSEL>>;
 
 let FPDPRounding = 1 in {
@@ -370,7 +371,8 @@ def V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite> {
   let isMoveImm = 1;
 }
 
-let isConvergent = 1 in {
+// FP32 denorm mode is respected, rounding mode is not. Exceptions are not supported.
+let isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1 in {
 def V_MFMA_F32_4X4X1F32    : VOP3Inst<"v_mfma_f32_4x4x1f32",    VOPProfileMAI_F32_F32_X4,    int_amdgcn_mfma_f32_4x4x1f32>;
 def V_MFMA_F32_4X4X4F16    : VOP3Inst<"v_mfma_f32_4x4x4f16",    VOPProfileMAI_F32_V4F16_X4,  int_amdgcn_mfma_f32_4x4x4f16>;
 def V_MFMA_I32_4X4X4I8     : VOP3Inst<"v_mfma_i32_4x4x4i8",     VOPProfileMAI_I32_I32_X4,    int_amdgcn_mfma_i32_4x4x4i8>;
@@ -391,7 +393,7 @@ def V_MFMA_I32_32X32X4I8   : VOP3Inst<"v_mfma_i32_32x32x4i8",   VOPProfileMAI_I3
 def V_MFMA_I32_32X32X8I8   : VOP3Inst<"v_mfma_i32_32x32x8i8",   VOPProfileMAI_I32_I32_X16,   int_amdgcn_mfma_i32_32x32x8i8>;
 def V_MFMA_F32_32X32X2BF16 : VOP3Inst<"v_mfma_f32_32x32x2bf16", VOPProfileMAI_F32_V2I16_X32, int_amdgcn_mfma_f32_32x32x2bf16>;
 def V_MFMA_F32_32X32X4BF16 : VOP3Inst<"v_mfma_f32_32x32x4bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_32x32x4bf16>;
-} // End isConvergent = 1
+} // End isConvergent = 1, mayRaiseFPException = 0, ReadsModeReg = 1
 
 } // End SubtargetPredicate = HasMAIInsts
 

diff  --git a/llvm/lib/Target/AMDGPU/VOPCInstructions.td b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
index 003a4f73c156..aa2fa260e7b5 100644
--- a/llvm/lib/Target/AMDGPU/VOPCInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPCInstructions.td
@@ -92,9 +92,11 @@ class VOPC_Pseudo <string opName, VOPC_Profile P, list<dag> pattern=[],
   let mayStore = 0;
   let hasSideEffects = 0;
 
+  let ReadsModeReg = isFloatType<P.Src0VT>.ret;
+
   let VALU = 1;
   let VOPC = 1;
-  let Uses = [EXEC];
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
   let Defs = !if(DefVcc, [VCC], []);
 
   VOPProfile Pfl = P;
@@ -738,6 +740,9 @@ multiclass VOPC_CLASS_F64 <string opName> :
 multiclass VOPCX_CLASS_F64 <string opName> :
   VOPCX_Class_Pseudos <opName, VOPC_I1_F64_I32, VOPC_F64_I32>;
 
+// cmp_class ignores the FP mode and faithfully reports the unmodified
+// source value.
+let ReadsModeReg = 0, mayRaiseFPException = 0 in {
 defm V_CMP_CLASS_F32 : VOPC_CLASS_F32 <"v_cmp_class_f32">;
 defm V_CMPX_CLASS_F32 : VOPCX_CLASS_F32 <"v_cmpx_class_f32">;
 defm V_CMP_CLASS_F64 : VOPC_CLASS_F64 <"v_cmp_class_f64">;
@@ -747,6 +752,7 @@ let SubtargetPredicate = Has16BitInsts in {
 defm V_CMP_CLASS_F16  : VOPC_CLASS_F16 <"v_cmp_class_f16">;
 defm V_CMPX_CLASS_F16 : VOPCX_CLASS_F16 <"v_cmpx_class_f16">;
 }
+} // End ReadsModeReg = 0, mayRaiseFPException = 0
 
 //===----------------------------------------------------------------------===//
 // V_ICMPIntrinsic Pattern.

diff  --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 42a275c6c365..d52ad7f92997 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -8,6 +8,8 @@
 
 // dummies for outer let
 class LetDummies {
+  bit ReadsModeReg;
+  bit mayRaiseFPException;
   bit isCommutable;
   bit isConvertibleToThreeAddress;
   bit isMoveImm;
@@ -35,7 +37,7 @@ class VOPAnyCommon <dag outs, dag ins, string asm, list<dag> pattern> :
   let hasSideEffects = 0;
   let UseNamedOperandTable = 1;
   let VALU = 1;
-  let Uses = [EXEC];
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
 }
 
 class VOP_Pseudo <string opName, string suffix, VOPProfile P, dag outs, dag ins,
@@ -118,7 +120,12 @@ class VOP3_Pseudo <string opName, VOPProfile P, list<dag> pattern = [],
   let ClampLo = P.HasClampLo;
   let ClampHi = P.HasClampHi;
 
-  let Uses = [EXEC];
+  let ReadsModeReg = !or(isFloatType<P.DstVT>.ret, isFloatType<P.Src0VT>.ret);
+
+  // FIXME: Set this. Right now it seems regular IR operations don't
+  // automatically imply no FP exceptions.
+  // let mayRaiseFPException = ReadsModeReg;
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
 
   let AsmVariantName = AMDGPUAsmVariants.VOP3;
   let AsmMatchConverter =
@@ -490,7 +497,13 @@ class VOP_SDWA_Pseudo <string opName, VOPProfile P, list<dag> pattern=[]> :
 
   let VALU = 1;
   let SDWA = 1;
-  let Uses = [EXEC];
+
+  let ReadsModeReg = !or(isFloatType<P.DstVT>.ret, isFloatType<P.Src0VT>.ret);
+
+  // FIXME: Set this. Right now it seems regular IR operations don't
+  // automatically imply no FP exceptions.
+  // let mayRaiseFPException = ReadsModeReg;
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
 
   let SubtargetPredicate = HasSDWA;
   let AssemblerPredicate = HasSDWA;
@@ -607,7 +620,13 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
   let VALU = 1;
   let DPP = 1;
   let Size = 8;
-  let Uses = [EXEC];
+
+  let ReadsModeReg = !or(isFloatType<P.DstVT>.ret, isFloatType<P.Src0VT>.ret);
+
+  // FIXME: Set this. Right now it seems regular IR operations don't
+  // automatically imply no FP exceptions.
+  // let mayRaiseFPException = ReadsModeReg;
+  let Uses = !if(ReadsModeReg, [MODE, EXEC], [EXEC]);
   let isConvergent = 1;
 
   string Mnemonic = OpName;

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
index 29e59cd4b294..d010f7f44160 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
@@ -15,7 +15,7 @@ body: |
     ; CHECK-LABEL: name: cos_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_COS_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %0
@@ -35,7 +35,7 @@ body: |
     ; CHECK-LABEL: name: cos_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_COS_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
index a18242c3e448..86b782b27e82 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
@@ -17,7 +17,7 @@ body: |
     ; CHECK-LABEL: name: cos_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_COS_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,7 +38,7 @@ body: |
     ; CHECK-LABEL: name: cos_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_COS_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
index bc987b070375..2eef865fc85b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
@@ -15,7 +15,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -37,7 +37,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -58,7 +58,7 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmad.ftz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmad.ftz.mir
index b9f88557678a..fe51eb9a3938 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmad.ftz.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmad.ftz.mir
@@ -19,7 +19,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -43,7 +43,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -67,7 +67,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -92,7 +92,7 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]]
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY3]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -117,7 +117,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -140,7 +140,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -163,7 +163,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -185,7 +185,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
-    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %0, %0
@@ -226,7 +226,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir
index 92f264f6b7ea..13d268d68e94 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.mir
@@ -16,7 +16,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -40,7 +40,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -64,7 +64,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -88,7 +88,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -113,7 +113,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -135,7 +135,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -157,7 +157,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY1]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY1]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -178,7 +178,7 @@ body: |
     ; GCN-LABEL: name: fmed3_s32_vsss
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F32_:%[0-9]+]]:vgpr_32 = V_MED3_F32 0, [[COPY]], 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmed3), %0, %0, %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
index c47565181cfc..7ffb25190770 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
@@ -21,7 +21,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F16_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -48,7 +48,7 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MED3_F16_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
index f9e9978e9cad..8d62c2c49049 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
@@ -15,7 +15,7 @@ body: |
     ; CHECK-LABEL: name: fract_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
@@ -35,7 +35,7 @@ body: |
     ; CHECK-LABEL: name: fract_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
@@ -55,7 +55,7 @@ body: |
     ; CHECK-LABEL: name: fract_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F64_e64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
@@ -75,7 +75,7 @@ body: |
     ; CHECK-LABEL: name: fract_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
index ecae749c163b..8360aee9a83e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
@@ -5,6 +5,8 @@
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %1:sgpr(s16) (in function: fract_s16_vs)
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %1:vgpr(s16) (in function: fract_s16_vv)
 
+---
+
 name: fract_s16_vs
 legalized: true
 regBankSelected: true
@@ -17,7 +19,7 @@ body: |
     ; CHECK-LABEL: name: fract_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,7 +40,7 @@ body: |
     ; CHECK-LABEL: name: fract_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
index 7371dbb998ff..0fc2582f983f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
@@ -14,7 +14,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -35,7 +35,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -56,7 +56,7 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -77,7 +77,7 @@ body: |
     ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s32) = COPY $vgpr0
@@ -98,7 +98,7 @@ body: |
     ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s32) = COPY $sgpr0
@@ -119,7 +119,7 @@ body: |
     ; GCN: liveins: $vgpr0_vgpr1, $vgpr2
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = COPY $vgpr2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
index ad63aa85e9a0..1a620849f336 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
@@ -19,7 +19,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -41,7 +41,7 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -63,7 +63,7 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
index def9d91830b8..3dbed8a7b5dd 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
@@ -20,7 +20,7 @@ body: |
     ; CHECK-LABEL: name: rcp_legacy_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_LEGACY_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp.legacy), %0
@@ -40,7 +40,7 @@ body: |
     ; CHECK-LABEL: name: rcp_legacy_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_LEGACY_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp.legacy), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
index 6dfdec779273..fce886429408 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
@@ -15,7 +15,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
@@ -35,7 +35,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
@@ -55,7 +55,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F64_e64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
@@ -75,7 +75,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
index 90cf12ee37e7..c69890ae5c85 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
@@ -5,6 +5,7 @@
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %1:sgpr(s16) (in function: rcp_s16_vs)
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %1:vgpr(s16) (in function: rcp_s16_vv)
 
+---
 name: rcp_s16_vs
 legalized: true
 regBankSelected: true
@@ -17,7 +18,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,7 +39,7 @@ body: |
     ; CHECK-LABEL: name: rcp_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
index 6e514d217b56..4b78bf341b52 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
@@ -20,7 +20,7 @@ body: |
     ; CHECK-LABEL: name: rsq_clamp_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_CLAMP_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0
@@ -40,7 +40,7 @@ body: |
     ; CHECK-LABEL: name: rsq_clamp_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_CLAMP_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
index 0df3078a0e70..2c129b08c651 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
@@ -20,7 +20,7 @@ body: |
     ; CHECK-LABEL: name: rsq_legacy_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_LEGACY_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.legacy), %0
@@ -40,7 +40,7 @@ body: |
     ; CHECK-LABEL: name: rsq_legacy_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_LEGACY_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.legacy), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
index 65fcb5deb44f..4cf3fc5a9b42 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
@@ -15,7 +15,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
@@ -35,7 +35,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
@@ -55,7 +55,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F64_e64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
@@ -75,7 +75,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
index be12c84cf75e..8620efd9fdce 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
@@ -17,7 +17,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,7 +38,7 @@ body: |
     ; CHECK-LABEL: name: rsq_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
index b069bc7ead29..90e586c6888b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
@@ -15,7 +15,7 @@ body: |
     ; CHECK-LABEL: name: sin_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %0
@@ -35,7 +35,7 @@ body: |
     ; CHECK-LABEL: name: sin_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
index ff049d1be98f..903ee4a9a040 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
@@ -17,7 +17,7 @@ body: |
     ; CHECK-LABEL: name: sin_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,7 +38,7 @@ body: |
     ; CHECK-LABEL: name: sin_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
index c94e0665da35..ed510864f3bb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -36,7 +36,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vsv
     ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -59,7 +59,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -82,7 +82,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fabs_lhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -106,7 +106,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fabs_rhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 2, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 2, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -130,7 +130,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fneg_fabs_lhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -155,7 +155,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fneg_fabs_rhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -180,7 +180,7 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_fneg_copy_sgpr
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $exec
+    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
index 064e06a684c3..65482f3cee96 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -34,7 +34,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vsv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -55,7 +55,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -76,7 +76,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -97,7 +97,7 @@ body: |
     liveins: $vgpr0, $vgpr1
     ; GFX6-LABEL: name: fadd_s32_vvv_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -119,7 +119,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv_fneg_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -141,7 +141,7 @@ body: |
     liveins: $vgpr0, $vgpr1
     ; GFX6-LABEL: name: fadd_s32_vvv_fneg_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -166,7 +166,7 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -191,7 +191,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_copy_fneg_copy_fabs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
@@ -222,7 +222,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
@@ -249,7 +249,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
@@ -276,7 +276,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
index 0525e5ecc15c..b4b9e2ce1385 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -34,7 +34,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vsv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
@@ -55,7 +55,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -76,7 +76,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -97,7 +97,7 @@ body: |
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
     ; GFX6-LABEL: name: fadd_s64_vvv_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -119,7 +119,7 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv_fneg_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -141,7 +141,7 @@ body: |
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
     ; GFX6-LABEL: name: fadd_s64_vvv_fneg_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
@@ -167,7 +167,7 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s64) = COPY $sgpr0_sgpr1
@@ -196,7 +196,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
@@ -223,7 +223,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
@@ -250,7 +250,7 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $exec
+    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
index 75086984a142..7bf63ebfa1fa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
@@ -17,11 +17,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_f16_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_f16_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -44,11 +44,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_f16_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, 15360, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, 15360, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_f16_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -72,11 +72,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCANONICALIZE %0
@@ -99,11 +99,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCANONICALIZE %0
@@ -126,11 +126,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_v2f16_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX8: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
     ; GFX9-LABEL: name: fcanonicalize_v2f16_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = G_FCANONICALIZE %0
@@ -153,11 +153,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_v2f16_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 0, 15360, 8, [[COPY]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX8: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 0, 15360, 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
     ; GFX9-LABEL: name: fcanonicalize_v2f16_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = G_FCANONICALIZE %0
@@ -180,11 +180,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f64_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX8: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MAX_F64_]]
     ; GFX9-LABEL: name: fcanonicalize_f64_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCANONICALIZE %0
@@ -207,11 +207,11 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f64_flush
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX8: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, 4607182418800017408, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, 4607182418800017408, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F64_]]
     ; GFX9-LABEL: name: fcanonicalize_f64_flush
     ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCANONICALIZE %0
@@ -233,11 +233,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fabs_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fabs_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FABS %0
@@ -261,11 +261,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fabs_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fabs_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FABS %0
@@ -288,11 +288,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fneg_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fneg_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
@@ -315,11 +315,11 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fneg_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fneg_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
@@ -344,13 +344,13 @@ body: |
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX8: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fneg_fabs_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX9: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
@@ -376,13 +376,13 @@ body: |
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX8: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $exec
+    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
     ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
     ; GFX9-LABEL: name: fcanonicalize_fneg_fabs_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX9: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $exec
+    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
index 70c5b76d758f..fdf6dcfb8d10 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fceil_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_CEIL_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCEIL %0
@@ -34,7 +34,7 @@ body: |
     ; CHECK-LABEL: name: fceil_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_CEIL_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FCEIL %0
@@ -54,7 +54,7 @@ body: |
     ; CHECK-LABEL: name: fceil_s64_sv
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_CEIL_F64_e64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_FCEIL %0
@@ -74,7 +74,7 @@ body: |
     ; CHECK-LABEL: name: fceil_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_CEIL_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCEIL %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
index 92b615e8cf6e..75a78190e62e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
@@ -38,7 +38,7 @@ body: |
     ; GCN-LABEL: name: fceil_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -60,7 +60,7 @@ body: |
     ; GCN-LABEL: name: fceil_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -82,7 +82,7 @@ body: |
     ; GCN-LABEL: name: fceil_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
index 74f9154bd966..c052f484bff3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
@@ -37,13 +37,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_oeq_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -62,13 +62,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ogt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -87,13 +87,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_oge_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -112,13 +112,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_olt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -137,13 +137,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ole_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -162,13 +162,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_one_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -187,13 +187,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_O_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ord_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -212,13 +212,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_uno_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -237,13 +237,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ueq_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -262,13 +262,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ugt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -287,13 +287,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_uge_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -312,13 +312,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ult_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -337,13 +337,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_ule_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -362,13 +362,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F32_e64_]]
     ; WAVE32-LABEL: name: fcmp_une_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -435,13 +435,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_oeq_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -460,13 +460,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ogt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -485,13 +485,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_oge_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -510,13 +510,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_olt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -535,13 +535,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ole_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -560,13 +560,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_one_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -585,13 +585,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_O_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ord_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -610,13 +610,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_uno_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -635,13 +635,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ueq_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -660,13 +660,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ugt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -685,13 +685,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_uge_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -710,13 +710,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ult_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -735,13 +735,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_ule_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -760,13 +760,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F64_e64_]]
     ; WAVE32-LABEL: name: fcmp_une_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -809,14 +809,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s32_vv_select_user
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], [[V_CMP_EQ_F32_e64_]], implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CNDMASK_B32_e64_]]
     ; WAVE32-LABEL: name: fcmp_oeq_s32_vv_select_user
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], [[V_CMP_EQ_F32_e64_]], implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CNDMASK_B32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
index 42b017b409a5..a0354d840393 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
@@ -43,13 +43,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_oeq_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -70,13 +70,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ogt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -97,13 +97,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_oge_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -124,13 +124,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_olt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -151,13 +151,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ole_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -177,13 +177,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_one_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -204,13 +204,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ord_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -231,13 +231,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_uno_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -258,13 +258,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ueq_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -285,13 +285,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ugt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -312,13 +312,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_uge_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -339,13 +339,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ult_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -366,13 +366,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_ule_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -393,13 +393,13 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE64: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F16_e64_]]
     ; WAVE32-LABEL: name: fcmp_une_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $exec
+    ; WAVE32: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
index bb2ba24d411c..a0339fa9551e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fexp2_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_EXP_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FEXP2 %0
@@ -34,7 +34,7 @@ body: |
     ; CHECK-LABEL: name: fexp2_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_EXP_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FEXP2 %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
index fa462ac93b06..68bde4c25b64 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
@@ -38,7 +38,7 @@ body: |
     ; VI-LABEL: name: ffloor_s16_vv
     ; VI: liveins: $vgpr0
     ; VI: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -60,7 +60,7 @@ body: |
     ; VI-LABEL: name: ffloor_s16_vs
     ; VI: liveins: $sgpr0
     ; VI: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -90,7 +90,7 @@ body: |
     ; VI-LABEL: name: ffloor_fneg_s16_vv
     ; VI: liveins: $vgpr0
     ; VI: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
index 611eab6bfa69..710a7927acd2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FFLOOR %0
@@ -34,7 +34,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FFLOOR %0
@@ -54,7 +54,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = G_FNEG %0
@@ -74,7 +74,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
index 1af481c27a97..276a1ffb9930 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_FLOOR_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FFLOOR %0
@@ -50,7 +50,7 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_FLOOR_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FNEG %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
index c812fc48f135..2034eb73fdf0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
@@ -17,20 +17,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     ; GFX10-LABEL: name: fma_f32
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -54,20 +54,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src0
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     ; GFX10-LABEL: name: fma_f32_fneg_src0
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -92,20 +92,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src1
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     ; GFX10-LABEL: name: fma_f32_fneg_src1
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -130,20 +130,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX10-LABEL: name: fma_f32_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -168,20 +168,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32_fabs_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX10-LABEL: name: fma_f32_fabs_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -206,20 +206,20 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX9-DL-LABEL: name: fma_f32_copy_fneg_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     ; GFX10-LABEL: name: fma_f32_copy_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmad.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmad.s32.mir
index 019bcd5cf2f3..99e776b5d0ff 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmad.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmad.s32.mir
@@ -16,14 +16,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     ; GFX10-LABEL: name: fmad_f32
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -47,14 +47,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     ; GFX10-LABEL: name: fmad_f32_fneg_src0
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -79,14 +79,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     ; GFX10-LABEL: name: fmad_f32_fneg_src1
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -111,14 +111,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     ; GFX10-LABEL: name: fmad_f32_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -143,14 +143,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     ; GFX10-LABEL: name: fmad_f32_fabs_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -175,14 +175,14 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX6: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX6: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     ; GFX10-LABEL: name: fmad_f32_copy_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $exec
+    ; GFX10: [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX10: S_ENDPGM 0, implicit [[V_MAD_F32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
index 636b1d2dda69..720f9285961a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
@@ -21,15 +21,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -89,15 +89,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
index 32ef48fcf4da..e94ab1c3cdc5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_ieee_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -36,7 +36,7 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_ieee_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
index 3028a1f1493f..bc2e53d421c2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX9-LABEL: name: fmaxnum_ieee_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
index 020e171d3fd5..a440e801682f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
@@ -22,15 +22,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -88,15 +88,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
index e1caa4cce7e8..1bf0c576adfe 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -36,7 +36,7 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
index 0b3b1a9ff9d6..bc83f90c8a11 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
@@ -14,7 +14,7 @@ body: |
     ; GFX9-LABEL: name: fmaxnum_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
index d6ac32e41543..40b97460b203 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
@@ -21,15 +21,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -89,15 +89,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
index 432243ec9c9c..cf00b1b1d80a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fminnum_ieee_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -36,7 +36,7 @@ body: |
     ; CHECK-LABEL: name: fminnum_ieee_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
index 13853bf90c5c..0bb68ef86ed9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX9-LABEL: name: fminnum_ieee_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MIN_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
index 1f4decb7826a..74350b247fc4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
@@ -22,15 +22,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -88,15 +88,15 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $exec
+    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
     ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
index 71d7b6e8c6df..0a4f65544a46 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: fminnum_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
@@ -36,7 +36,7 @@ body: |
     ; CHECK-LABEL: name: fminnum_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $exec
+    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
index 84afe51ca3cf..255d05d39f00 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX9-LABEL: name: fminnum_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MIN_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
index c7dbeada2ca4..babbe653b980 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
@@ -15,9 +15,9 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
@@ -53,9 +53,9 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F64_1:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F64_2:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec
+    ; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F64_1:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F64_2:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MUL_F64_]], implicit [[V_MUL_F64_1]], implicit [[V_MUL_F64_2]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
@@ -86,9 +86,9 @@ body: |
     ; GCN-LABEL: name: fmul_f16
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F16_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F16_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]], implicit [[V_MUL_F16_e64_1]], implicit [[V_MUL_F16_e64_2]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
@@ -123,16 +123,16 @@ body: |
     ; GCN-LABEL: name: fmul_modifiers_f32
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_5:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_6:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_5:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_6:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
index 665a3589831d..b70c8e25ccd1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
@@ -13,7 +13,7 @@ body: |
     ; GFX9-LABEL: name: fmul_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
@@ -33,7 +33,7 @@ body: |
     ; GFX9-LABEL: name: fmul_v2f16_fneg_v_fneg_v
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
@@ -60,7 +60,7 @@ body: |
     ; GFX9: [[FNEG:%[0-9]+]]:vgpr(s16) = G_FNEG [[TRUNC]]
     ; GFX9: [[ANYEXT:%[0-9]+]]:vgpr(s32) = G_ANYEXT [[FNEG]](s16)
     ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:vgpr_32(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[COPY2]](s32)
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32(<2 x s16>) = V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $exec
+    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32(<2 x s16>) = V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]](<2 x s16>)
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
index e1e8c0e250be..64662d748cd1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
@@ -14,7 +14,7 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FPTOSI %0
@@ -34,7 +34,7 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FPTOSI %0
@@ -54,7 +54,7 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_fneg_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
@@ -75,8 +75,8 @@ body: |
     ; GCN-LABEL: name: fptosi_s16_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -97,8 +97,8 @@ body: |
     ; GCN-LABEL: name: fptosi_s16_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -121,8 +121,8 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
     ; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
index e6736f2d7147..a13620ad9452 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
@@ -15,8 +15,8 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; GCN: [[V_CVT_U32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $exec
+    ; GCN: [[V_CVT_U32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_U32_F32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     %0:sgpr(s32) = COPY $sgpr0
@@ -48,8 +48,8 @@ body: |
     ; GCN-LABEL: name: fptoui_s16_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -70,8 +70,8 @@ body: |
     ; GCN-LABEL: name: fptoui_s16_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -94,8 +94,8 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
     ; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $exec
+    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
+    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
index 45a8551ee47e..316046edaad4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
@@ -15,7 +15,7 @@ body: |
     ; GCN-LABEL: name: frint_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FRINT %0
@@ -35,7 +35,7 @@ body: |
     ; GCN-LABEL: name: frint_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FRINT %0
@@ -55,7 +55,7 @@ body: |
     ; GCN-LABEL: name: frint_fneg_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
@@ -76,7 +76,7 @@ body: |
     ; GCN-LABEL: name: frint_s64_vv
     ; GCN: liveins: $vgpr0_vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FRINT %0
@@ -96,7 +96,7 @@ body: |
     ; GCN-LABEL: name: frint_s64_fneg_vv
     ; GCN: liveins: $vgpr0_vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FNEG %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
index c72ea740a398..e449a7b93baa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
@@ -38,7 +38,7 @@ body: |
     ; GCN-LABEL: name: frint_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -60,7 +60,7 @@ body: |
     ; GCN-LABEL: name: frint_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -82,7 +82,7 @@ body: |
     ; GCN-LABEL: name: frint_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
index 550f47c5471a..a9cd8c51f62a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
@@ -14,7 +14,7 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC_TRUNC %0
@@ -34,7 +34,7 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC_TRUNC %0
@@ -54,7 +54,7 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s64_sv
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC_TRUNC %0
@@ -74,7 +74,7 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
index 1bf97cac9602..d2fb035c8b6b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
@@ -14,7 +14,7 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
@@ -36,7 +36,7 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -58,7 +58,7 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $exec
+    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
index e68fda19d493..3cd2362b1093 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
@@ -16,8 +16,8 @@ body: |
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; WAVE64: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $exec
-    ; WAVE64: [[V_CVT_F32_I32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY1]], 0, 0, implicit $exec
+    ; WAVE64: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; WAVE64: [[V_CVT_F32_I32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; WAVE64: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_F32_I32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; WAVE64: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_F32_I32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     ; WAVE32-LABEL: name: sitofp
@@ -25,8 +25,8 @@ body: |
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; WAVE32: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $exec
-    ; WAVE32: [[V_CVT_F32_I32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY1]], 0, 0, implicit $exec
+    ; WAVE32: [[V_CVT_F32_I32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; WAVE32: [[V_CVT_F32_I32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e64 [[COPY1]], 0, 0, implicit $mode, implicit $exec
     ; WAVE32: GLOBAL_STORE_DWORD [[COPY2]], [[V_CVT_F32_I32_e64_]], 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
     ; WAVE32: GLOBAL_STORE_DWORD [[COPY2]], [[V_CVT_F32_I32_e64_1]], 0, 0, 0, 0, implicit $exec :: (store 4, addrspace 1)
     %0:sgpr(s32) = COPY $sgpr0
@@ -58,15 +58,15 @@ body: |
     ; WAVE64-LABEL: name: sitofp_s32_to_s16_vv
     ; WAVE64: liveins: $vgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec
+    ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     ; WAVE32-LABEL: name: sitofp_s32_to_s16_vv
     ; WAVE32: liveins: $vgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec
+    ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_SITOFP %0
@@ -87,15 +87,15 @@ body: |
     ; WAVE64-LABEL: name: sitofp_s32_to_s16_vs
     ; WAVE64: liveins: $sgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec
+    ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     ; WAVE32-LABEL: name: sitofp_s32_to_s16_vs
     ; WAVE32: liveins: $sgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $exec
+    ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s16) = G_SITOFP %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
index d35f7c428a47..421b987f8f92 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
@@ -15,13 +15,13 @@ body: |
     ; WAVE64-LABEL: name: uitofp_s32_to_s32_vv
     ; WAVE64: liveins: $vgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE64: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec
+    ; WAVE64: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]]
     ; WAVE32-LABEL: name: uitofp_s32_to_s32_vv
     ; WAVE32: liveins: $vgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE32: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec
+    ; WAVE32: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_UITOFP %0
@@ -41,13 +41,13 @@ body: |
     ; WAVE64-LABEL: name: uitofp_s32_to_s32_vs
     ; WAVE64: liveins: $sgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE64: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec
+    ; WAVE64: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]]
     ; WAVE32-LABEL: name: uitofp_s32_to_s32_vs
     ; WAVE32: liveins: $sgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE32: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $exec
+    ; WAVE32: [[V_CVT_F32_U32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e64 [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F32_U32_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_UITOFP %0
@@ -67,15 +67,15 @@ body: |
     ; WAVE64-LABEL: name: uitofp_s32_to_s16_vv
     ; WAVE64: liveins: $vgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec
+    ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     ; WAVE32-LABEL: name: uitofp_s32_to_s16_vv
     ; WAVE32: liveins: $vgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec
+    ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_UITOFP %0
@@ -96,15 +96,15 @@ body: |
     ; WAVE64-LABEL: name: uitofp_s32_to_s16_vs
     ; WAVE64: liveins: $sgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec
+    ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
     ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     ; WAVE32-LABEL: name: uitofp_s32_to_s16_vs
     ; WAVE32: liveins: $sgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $exec
+    ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
+    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
     ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s16) = G_UITOFP %0

diff  --git a/llvm/test/CodeGen/AMDGPU/bundle-latency.mir b/llvm/test/CodeGen/AMDGPU/bundle-latency.mir
index 603d0cf33f90..2bb21dec55a2 100644
--- a/llvm/test/CodeGen/AMDGPU/bundle-latency.mir
+++ b/llvm/test/CodeGen/AMDGPU/bundle-latency.mir
@@ -13,14 +13,14 @@ body:             |
     ; GCN:   $vgpr0 = GLOBAL_LOAD_DWORD undef $vgpr3_vgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN:   $vgpr1 = GLOBAL_LOAD_DWORD undef $vgpr3_vgpr4, 4, 0, 0, 0, implicit $exec
     ; GCN: }
-    ; GCN: $vgpr6 = V_ADD_F32_e32 killed $vgpr0, $vgpr0, implicit $exec
-    ; GCN: $vgpr5 = V_ADD_F32_e32 killed $vgpr1, $vgpr1, implicit $exec
+    ; GCN: $vgpr6 = V_ADD_F32_e32 killed $vgpr0, $vgpr0, implicit $mode, implicit $exec
+    ; GCN: $vgpr5 = V_ADD_F32_e32 killed $vgpr1, $vgpr1, implicit $mode, implicit $exec
     $vgpr0, $vgpr1 = BUNDLE undef $vgpr3_vgpr4, implicit $exec {
       $vgpr0 = GLOBAL_LOAD_DWORD undef $vgpr3_vgpr4, 0, 0, 0, 0, implicit $exec
       $vgpr1 = GLOBAL_LOAD_DWORD undef $vgpr3_vgpr4, 4, 0, 0, 0, implicit $exec
     }
-    $vgpr5 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $exec
-    $vgpr6 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $exec
+    $vgpr5 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
+    $vgpr6 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $mode, implicit $exec
 ...
 
 ---
@@ -29,14 +29,14 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     ; GCN-LABEL: name: dst_bundle_latency
-    ; GCN: $vgpr1 = V_ADD_F32_e32 undef $vgpr6, undef $vgpr6, implicit $exec
-    ; GCN: $vgpr0 = V_ADD_F32_e32 undef $vgpr5, undef $vgpr5, implicit $exec
+    ; GCN: $vgpr1 = V_ADD_F32_e32 undef $vgpr6, undef $vgpr6, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = V_ADD_F32_e32 undef $vgpr5, undef $vgpr5, implicit $mode, implicit $exec
     ; GCN: BUNDLE killed $vgpr0, killed $vgpr1, undef $vgpr3_vgpr4, implicit $exec {
     ; GCN:   GLOBAL_STORE_DWORD undef $vgpr3_vgpr4, killed $vgpr1, 0, 0, 0, 0, implicit $exec
     ; GCN:   GLOBAL_STORE_DWORD undef $vgpr3_vgpr4, killed $vgpr0, 4, 0, 0, 0, implicit $exec
     ; GCN: }
-    $vgpr0 = V_ADD_F32_e32 undef $vgpr5, undef $vgpr5, implicit $exec
-    $vgpr1 = V_ADD_F32_e32 undef $vgpr6, undef $vgpr6, implicit $exec
+    $vgpr0 = V_ADD_F32_e32 undef $vgpr5, undef $vgpr5, implicit $mode, implicit $exec
+    $vgpr1 = V_ADD_F32_e32 undef $vgpr6, undef $vgpr6, implicit $mode, implicit $exec
     BUNDLE $vgpr0, $vgpr1, undef $vgpr3_vgpr4, implicit $exec {
       GLOBAL_STORE_DWORD undef $vgpr3_vgpr4, $vgpr1, 0, 0, 0, 0, implicit $exec
       GLOBAL_STORE_DWORD undef $vgpr3_vgpr4, $vgpr0, 4, 0, 0, 0, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
index f631bcd25811..f78ad501cebf 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
+++ b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands  %s -o - | FileCheck -check-prefix=GCN %s
 ---
 # GCN-LABEL: name: v_max_self_clamp_not_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $mode, implicit $exec
 
 name:            v_max_self_clamp_not_set_f32
 tracksRegLiveness: true
@@ -56,16 +56,16 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 
 ...
 ---
 # GCN-LABEL: name: v_clamp_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $mode, implicit $exec
 name:            v_clamp_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -118,8 +118,8 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 ...
@@ -127,8 +127,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_mul_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $mode, implicit $exec
 name:            v_omod_mul_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -181,8 +181,8 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 
@@ -191,8 +191,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_mul_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $mode, implicit $exec
 
 name:            v_omod_mul_clamp_already_set_f32
 tracksRegLiveness: true
@@ -246,8 +246,8 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 
@@ -269,8 +269,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_add_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $mode, implicit $exec
 name:            v_omod_add_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -323,8 +323,8 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 
@@ -333,8 +333,8 @@ body:             |
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_add_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $mode, implicit $exec
 
 name:            v_omod_add_clamp_already_set_f32
 tracksRegLiveness: true
@@ -388,8 +388,8 @@ body:             |
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
     %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 
@@ -407,6 +407,6 @@ body:             |
     liveins: $vgpr0
 
     %0 = COPY $vgpr0
-    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit $exec
+    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit $mode, implicit $exec
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
index a187cd11ed91..80a201bbfdd0 100644
--- a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
+++ b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
@@ -15,6 +15,6 @@ body:             |
   bb.0:
     %0 = IMPLICIT_DEF
     %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $mode, implicit $exec
     %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
index 96f35605b1c9..1ef5c1098b63 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir
@@ -22,10 +22,10 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
-    %0:vgpr_32 = V_MUL_F32_e32 0, undef %1:vgpr_32, implicit $exec
-    %2:vgpr_32 = V_CVT_U32_F32_e32 killed %0, implicit $exec
-    %3:vgpr_32 = V_CVT_F32_I32_e32 killed %2, implicit $exec
-    %4:vgpr_32 = V_CVT_U32_F32_e32 killed %3, implicit $exec
+    %0:vgpr_32 = nofpexcept V_MUL_F32_e32 0, undef %1:vgpr_32, implicit $mode, implicit $exec
+    %2:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 killed %0, implicit $mode, implicit $exec
+    %3:vgpr_32 = nofpexcept V_CVT_F32_I32_e32 killed %2, implicit $mode, implicit $exec
+    %4:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 killed %3, implicit $mode, implicit $exec
     %5:vgpr_32 = V_LSHRREV_B32_e32 4, killed %4, implicit $exec
     S_CBRANCH_SCC0 %bb.2, implicit undef $scc
 
@@ -126,7 +126,7 @@ body:             |
     %27.sub6:sgpr_256 = COPY %26
     %27.sub7:sgpr_256 = COPY killed %26
     %28:vgpr_32 = IMAGE_LOAD_V1_V4 killed %25, killed %27, 2, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
-    %29:vgpr_32 = V_ADD_F32_e32 0, killed %28, implicit $exec
+    %29:vgpr_32 = nofpexcept V_ADD_F32_e32 0, killed %28, implicit $mode, implicit $exec
     $m0 = S_MOV_B32 -1
     DS_WRITE_B32 undef %30:vgpr_32, killed %29, 0, 0, implicit $m0, implicit $exec :: (store 4 into `i32 addrspace(3)* undef`, addrspace 3)
     S_ENDPGM 0

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
index 8bcff8a99f45..848011a8faac 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-prune-error.mir
@@ -41,10 +41,10 @@ body:             |
 
   bb.2:
     successors: %bb.4(0x80000000)
-    %6:vgpr_32 = V_MUL_F32_e32 1031798784, undef %7:vgpr_32, implicit $exec
-    %8:vgpr_32 = V_FLOOR_F32_e32 killed %6, implicit $exec
-    %9:vgpr_32 = V_ADD_F32_e32 0, killed %8, implicit $exec
-    %10:vgpr_32 = V_CVT_U32_F32_e32 killed %9, implicit $exec
+    %6:vgpr_32 = nofpexcept V_MUL_F32_e32 1031798784, undef %7:vgpr_32, implicit $mode, implicit $exec
+    %8:vgpr_32 = nofpexcept V_FLOOR_F32_e32 killed %6, implicit $mode, implicit $exec
+    %9:vgpr_32 = nofpexcept V_ADD_F32_e32 0, killed %8, implicit $mode, implicit $exec
+    %10:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 killed %9, implicit $mode, implicit $exec
     %11:vgpr_32 = V_LSHLREV_B32_e32 1, killed %10, implicit $exec
     %12:sreg_64 = S_MOV_B64 0
     %13:sgpr_128 = COPY killed %2
@@ -243,8 +243,8 @@ body:             |
     S_BRANCH %bb.3
 
   bb.17:
-    %105:vgpr_32 = V_ADD_F32_e32 target-flags(amdgpu-rel32-lo) 0, %20.sub3, implicit $exec
-    %106:vgpr_32 = V_ADD_F32_e32 target-flags(amdgpu-gotprel32-hi) 0, killed %20.sub2, implicit $exec
+    %105:vgpr_32 = nofpexcept V_ADD_F32_e32 target-flags(amdgpu-rel32-lo) 0, %20.sub3, implicit $mode, implicit $exec
+    %106:vgpr_32 = nofpexcept V_ADD_F32_e32 target-flags(amdgpu-gotprel32-hi) 0, killed %20.sub2, implicit $mode, implicit $exec
     undef %107.sub0:vreg_64 = COPY killed %106
     %107.sub1:vreg_64 = COPY killed %105
     $exec = S_AND_B64 $exec, killed %0, implicit-def dead $scc
@@ -258,11 +258,11 @@ body:             |
     %109.sub6:sgpr_256 = COPY %108
     %109.sub7:sgpr_256 = COPY killed %108
     %110:vgpr_32 = IMAGE_SAMPLE_V1_V2 killed %107, killed %109, undef %111:sgpr_128, 8, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
-    %112:vgpr_32 = V_MUL_F32_e32 0, killed %110, implicit $exec
-    %113:vgpr_32 = V_MUL_F32_e32 0, killed %112, implicit $exec
-    %114:vgpr_32 = V_MAD_F32 0, killed %113, 0, 0, 0, 0, 0, 0, implicit $exec
-    %115:vgpr_32 = V_MAX_F32_e32 0, killed %114, implicit $exec
-    %116:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, killed %115, 0, 1065353216, 0, 0, implicit $exec
+    %112:vgpr_32 = nofpexcept V_MUL_F32_e32 0, killed %110, implicit $mode, implicit $exec
+    %113:vgpr_32 = nofpexcept V_MUL_F32_e32 0, killed %112, implicit $mode, implicit $exec
+    %114:vgpr_32 = nofpexcept V_MAD_F32 0, killed %113, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %115:vgpr_32 = nofpexcept V_MAX_F32_e32 0, killed %114, implicit $mode, implicit $exec
+    %116:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, killed %115, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
     EXP 0, undef %117:vgpr_32, killed %116, undef %118:vgpr_32, undef %119:vgpr_32, -1, -1, 15, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-subregjoin-fullcopy.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subregjoin-fullcopy.mir
index c3a945716f77..47ecb6c58538 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subregjoin-fullcopy.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subregjoin-fullcopy.mir
@@ -72,9 +72,9 @@ body: |
     %12:vreg_128 = BUFFER_LOAD_DWORDX4_OFFSET killed %11, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     undef %13.sub1:vreg_128 = COPY %9.sub1
     %13.sub2:vreg_128 = COPY %9.sub2
-    %14:sreg_64 = V_CMP_GT_F32_e64 0, target-flags(amdgpu-rel32-lo) 0, 0, killed %12.sub3, 0, implicit $exec
-    %15:vgpr_32 = V_ADD_F32_e32 1065353216, undef %16:vgpr_32, implicit $exec
-    %17:sreg_64 = V_CMP_GT_F32_e64 0, 0, 0, killed %15, 0, implicit $exec
+    %14:sreg_64 = nofpexcept V_CMP_GT_F32_e64 0, target-flags(amdgpu-rel32-lo) 0, 0, killed %12.sub3, 0, implicit $mode, implicit $exec
+    %15:vgpr_32 = nofpexcept V_ADD_F32_e32 1065353216, undef %16:vgpr_32, implicit $mode, implicit $exec
+    %17:sreg_64 = nofpexcept V_CMP_GT_F32_e64 0, 0, 0, killed %15, 0, implicit $mode, implicit $exec
     %18:sreg_64 = S_AND_B64 killed %17, killed %14, implicit-def dead $scc
     %19:sreg_64 = COPY %10
     %20:vreg_128 = COPY %13
@@ -127,8 +127,8 @@ body: |
 
   bb.13:
     successors: %bb.14(0x80000000)
-    %32:vgpr_32 = V_MUL_F32_e32 undef %33:vgpr_32, killed %30.sub1, implicit $exec
-    %34:vgpr_32 = V_MUL_F32_e32 undef %35:vgpr_32, killed %32, implicit $exec
+    %32:vgpr_32 = nofpexcept V_MUL_F32_e32 undef %33:vgpr_32, killed %30.sub1, implicit $mode, implicit $exec
+    %34:vgpr_32 = nofpexcept V_MUL_F32_e32 undef %35:vgpr_32, killed %32, implicit $mode, implicit $exec
     undef %36.sub0:vreg_128 = COPY %34
     %31:vreg_128 = COPY killed %36
 
@@ -144,30 +144,30 @@ body: |
 
   bb.16:
     successors: %bb.17(0x80000000)
-    %39:vgpr_32 = V_FMA_F32 0, undef %40:vgpr_32, 0, killed %37.sub0, 0, undef %41:vgpr_32, 0, 0, implicit $exec
-    %42:vgpr_32 = V_FMA_F32 0, undef %43:vgpr_32, 0, undef %44:vgpr_32, 0, killed %39, 0, 0, implicit $exec
-    %45:vgpr_32 = V_FMA_F32 0, undef %46:vgpr_32, 0, undef %47:vgpr_32, 0, killed %42, 0, 0, implicit $exec
-    dead %48:vgpr_32 = V_MUL_F32_e32 undef %49:vgpr_32, killed %45, implicit $exec
-    %50:vgpr_32 = V_MUL_F32_e32 undef %51:vgpr_32, undef %52:vgpr_32, implicit $exec
+    %39:vgpr_32 = nofpexcept V_FMA_F32 0, undef %40:vgpr_32, 0, killed %37.sub0, 0, undef %41:vgpr_32, 0, 0, implicit $mode, implicit $exec
+    %42:vgpr_32 = nofpexcept V_FMA_F32 0, undef %43:vgpr_32, 0, undef %44:vgpr_32, 0, killed %39, 0, 0, implicit $mode, implicit $exec
+    %45:vgpr_32 = nofpexcept V_FMA_F32 0, undef %46:vgpr_32, 0, undef %47:vgpr_32, 0, killed %42, 0, 0, implicit $mode, implicit $exec
+    dead %48:vgpr_32 = nofpexcept V_MUL_F32_e32 undef %49:vgpr_32, killed %45, implicit $mode, implicit $exec
+    %50:vgpr_32 = nofpexcept V_MUL_F32_e32 undef %51:vgpr_32, undef %52:vgpr_32, implicit $mode, implicit $exec
     undef %53.sub1:vreg_128 = COPY %50
     %38:vreg_128 = COPY killed %53
 
   bb.17:
     %54:vreg_128 = COPY killed %38
-    %55:vgpr_32 = V_FMA_F32 0, killed %54.sub1, 0, target-flags(amdgpu-gotprel32-lo) 1056964608, 0, 1056964608, 0, 0, implicit $exec
+    %55:vgpr_32 = nofpexcept V_FMA_F32 0, killed %54.sub1, 0, target-flags(amdgpu-gotprel32-lo) 1056964608, 0, 1056964608, 0, 0, implicit $mode, implicit $exec
     EXP 1, undef %56:vgpr_32, killed %55, undef %57:vgpr_32, undef %58:vgpr_32, -1, 0, 15, implicit $exec
     S_ENDPGM 0
 
   bb.18:
     successors: %bb.7(0x80000000)
-    dead %59:vgpr_32 = V_FMA_F32 0, killed %9.sub2, 0, undef %60:vgpr_32, 0, undef %61:vgpr_32, 0, 0, implicit $exec
+    dead %59:vgpr_32 = nofpexcept V_FMA_F32 0, killed %9.sub2, 0, undef %60:vgpr_32, 0, undef %61:vgpr_32, 0, 0, implicit $mode, implicit $exec
     dead %62:vgpr_32 = BUFFER_LOAD_DWORD_OFFEN undef %63:vgpr_32, undef %64:sgpr_128, undef %65:sreg_32, 0, 0, 0, 0, 0, 0, implicit $exec
     undef %66.sub1:vreg_128 = COPY %13.sub1
     %66.sub2:vreg_128 = COPY %13.sub2
-    %67:sreg_64 = V_CMP_NGT_F32_e64 0, 0, 0, undef %68:vgpr_32, 0, implicit $exec
-    %69:vgpr_32 = V_ADD_F32_e32 1065353216, undef %70:vgpr_32, implicit $exec
-    %71:vgpr_32 = V_ADD_F32_e32 1065353216, killed %69, implicit $exec
-    %72:sreg_64 = V_CMP_NGT_F32_e64 0, 0, 0, killed %71, 0, implicit $exec
+    %67:sreg_64 = nofpexcept V_CMP_NGT_F32_e64 0, 0, 0, undef %68:vgpr_32, 0, implicit $mode, implicit $exec
+    %69:vgpr_32 = nofpexcept V_ADD_F32_e32 1065353216, undef %70:vgpr_32, implicit $mode, implicit $exec
+    %71:vgpr_32 = nofpexcept V_ADD_F32_e32 1065353216, killed %69, implicit $mode, implicit $exec
+    %72:sreg_64 = nofpexcept V_CMP_NGT_F32_e64 0, 0, 0, killed %71, 0, implicit $mode, implicit $exec
     %73:sreg_64 = S_OR_B64 killed %72, killed %67, implicit-def dead $scc
     %74:sreg_64 = S_OR_B64 killed %73, killed %10, implicit-def dead $scc
     %19:sreg_64 = COPY killed %74

diff  --git a/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir b/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
index a666428ded91..85dcacb93ffc 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-with-subregs-bad-identical.mir
@@ -48,11 +48,11 @@ body: |
     %4.sub6:sgpr_256 = COPY %1
     %4.sub7:sgpr_256 = COPY killed %1
     %5:vgpr_32 = IMAGE_LOAD_V1_V4 killed %3, killed %4, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
-    %6:vgpr_32 = V_MAD_F32 0, killed %5, 0, 0, 0, 0, 0, 0, implicit $exec
-    %7:vgpr_32 = V_RCP_F32_e32 killed %6, implicit $exec
-    %8:vgpr_32 = V_MUL_F32_e32 0, killed %7, implicit $exec
-    %9:vgpr_32 = V_MAD_F32 0, killed %8, 0, 0, 0, 0, 0, 0, implicit $exec
-    dead %10:vgpr_32 = V_MAC_F32_e32 undef %11:vgpr_32, undef %12:vgpr_32, undef %10, implicit $exec
+    %6:vgpr_32 = nofpexcept V_MAD_F32 0, killed %5, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %7:vgpr_32 = nofpexcept V_RCP_F32_e32 killed %6, implicit $mode, implicit $exec
+    %8:vgpr_32 = nofpexcept V_MUL_F32_e32 0, killed %7, implicit $mode, implicit $exec
+    %9:vgpr_32 = nofpexcept V_MAD_F32 0, killed %8, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    dead %10:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %11:vgpr_32, undef %12:vgpr_32, undef %10, implicit $mode, implicit $exec
     undef %13.sub0:vreg_128 = COPY %9
     %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
     S_CBRANCH_SCC0 %bb.4, implicit undef $scc
@@ -65,12 +65,12 @@ body: |
 
   bb.4:
     successors: %bb.5(0x40000000), %bb.7(0x40000000)
-    %17:vgpr_32 = V_MAD_F32 0, killed %9, 0, 0, 0, 0, 0, 0, implicit $exec
-    %18:vgpr_32 = V_MIN_F32_e32 1065353216, killed %17, implicit $exec
-    %19:sreg_64_xexec = V_CMP_NEQ_F32_e64 0, 1065353216, 0, killed %18, 0, implicit $exec
+    %17:vgpr_32 = nofpexcept V_MAD_F32 0, killed %9, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %18:vgpr_32 = nofpexcept V_MIN_F32_e32 1065353216, killed %17, implicit $mode, implicit $exec
+    %19:sreg_64_xexec = nofpexcept V_CMP_NEQ_F32_e64 0, 1065353216, 0, killed %18, 0, implicit $mode, implicit $exec
     %20:vgpr_32 = V_MOV_B32_e32 2143289344, implicit $exec
     %21:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, killed %20, killed %19, implicit $exec
-    %22:sreg_64 = V_CMP_LT_F32_e64 0, 0, 0, killed %21, 0, implicit $exec
+    %22:sreg_64 = nofpexcept V_CMP_LT_F32_e64 0, 0, 0, killed %21, 0, implicit $mode, implicit $exec
     %23:sreg_64 = COPY $exec, implicit-def $exec
     %24:sreg_64 = S_AND_B64 %23, %22, implicit-def dead $scc
     $exec = S_MOV_B64_term killed %24
@@ -140,11 +140,11 @@ body: |
 
   bb.14:
     successors: %bb.15(0x40000000), %bb.16(0x40000000)
-    %38:vgpr_32 = V_MAD_F32 0, killed %36.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $exec
-    %39:vgpr_32 = V_MAD_F32 0, killed %38, 0, 0, 0, 0, 0, 0, implicit $exec
-    %40:vgpr_32 = V_MAD_F32 0, killed %39, 0, -1090519040, 0, 1056964608, 0, 0, implicit $exec
-    %41:vgpr_32 = V_MAD_F32 0, killed %40, 0, 0, 0, -1090519040, 0, 0, implicit $exec
-    %42:vgpr_32 = V_CVT_I32_F32_e32 killed %41, implicit $exec
+    %38:vgpr_32 = nofpexcept V_MAD_F32 0, killed %36.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %39:vgpr_32 = nofpexcept V_MAD_F32 0, killed %38, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %40:vgpr_32 = nofpexcept V_MAD_F32 0, killed %39, 0, -1090519040, 0, 1056964608, 0, 0, implicit $mode, implicit $exec
+    %41:vgpr_32 = nofpexcept V_MAD_F32 0, killed %40, 0, 0, 0, -1090519040, 0, 0, implicit $mode, implicit $exec
+    %42:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 killed %41, implicit $mode, implicit $exec
     %43:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %44:sgpr_128, 12, 0, 0 :: (dereferenceable invariant load 4)
     %45:vgpr_32 = V_MUL_LO_I32 killed %42, killed %43, implicit $exec
     %46:vgpr_32 = V_LSHLREV_B32_e32 2, killed %45, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/couldnt-join-subrange-3.mir b/llvm/test/CodeGen/AMDGPU/couldnt-join-subrange-3.mir
index 1605880a59e4..91901b723376 100644
--- a/llvm/test/CodeGen/AMDGPU/couldnt-join-subrange-3.mir
+++ b/llvm/test/CodeGen/AMDGPU/couldnt-join-subrange-3.mir
@@ -17,9 +17,9 @@ body:             |
   ; GCN-LABEL: name: _amdgpu_ps_main
   ; GCN: bb.0:
   ; GCN:   successors: %bb.1(0x80000000)
-  ; GCN:   [[V_TRUNC_F32_e32_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e32 undef %4:vgpr_32, implicit $exec
-  ; GCN:   [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_TRUNC_F32_e32_]], implicit $exec
-  ; GCN:   [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 4, [[V_CVT_U32_F32_e32_]], implicit $exec
+  ; GCN:   %3:vgpr_32 = nofpexcept V_TRUNC_F32_e32 undef %4:vgpr_32, implicit $mode, implicit $exec
+  ; GCN:   %5:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %3, implicit $mode, implicit $exec
+  ; GCN:   [[V_LSHRREV_B32_e32_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e32 4, %5, implicit $exec
   ; GCN:   undef %11.sub0:vreg_128 = V_MUL_LO_I32 [[V_LSHRREV_B32_e32_]], 3, implicit $exec
   ; GCN:   %11.sub3:vreg_128 = COPY %11.sub0
   ; GCN:   [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0
@@ -47,19 +47,19 @@ body:             |
   ; GCN:   S_CBRANCH_VCCNZ %bb.4, implicit killed $vcc
   ; GCN:   S_BRANCH %bb.6
   ; GCN: bb.5:
-  ; GCN:   [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 target-flags(amdgpu-gotprel) 0, %11.sub0, implicit $exec
-  ; GCN:   [[V_MIN_F32_e32_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e32 1106771968, [[V_MUL_F32_e32_]], implicit $exec
-  ; GCN:   [[V_MAD_F32_:%[0-9]+]]:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, [[V_MIN_F32_e32_]], 0, 0, 0, 0, 0, 0, implicit $exec
-  ; GCN:   [[V_MAD_F32_1:%[0-9]+]]:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, [[V_MAD_F32_]], 0, 0, 0, 0, 0, 0, implicit $exec
-  ; GCN:   [[V_MAD_F32_2:%[0-9]+]]:vgpr_32 = V_MAD_F32 0, [[V_MAD_F32_1]], 0, 0, 0, 0, 0, 0, implicit $exec
-  ; GCN:   [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[V_MAD_F32_2]], 0, undef %27:vgpr_32, 0, 0, implicit $exec
-  ; GCN:   EXP_DONE 0, [[V_CVT_PKRTZ_F16_F32_e64_]], undef %28:vgpr_32, undef %29:vgpr_32, undef %30:vgpr_32, -1, -1, 15, implicit $exec
+  ; GCN:   %21:vgpr_32 = nofpexcept V_MUL_F32_e32 target-flags(amdgpu-gotprel) 0, %11.sub0, implicit $mode, implicit $exec
+  ; GCN:   %22:vgpr_32 = nofpexcept V_MIN_F32_e32 1106771968, %21, implicit $mode, implicit $exec
+  ; GCN:   %23:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, %22, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+  ; GCN:   %24:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, %23, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+  ; GCN:   %25:vgpr_32 = nofpexcept V_MAD_F32 0, %24, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+  ; GCN:   %26:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, %25, 0, undef %27:vgpr_32, 0, 0, implicit $mode, implicit $exec
+  ; GCN:   EXP_DONE 0, %26, undef %28:vgpr_32, undef %29:vgpr_32, undef %30:vgpr_32, -1, -1, 15, implicit $exec
   ; GCN:   S_ENDPGM 0
   ; GCN: bb.6:
   ; GCN:   S_ENDPGM 0
   bb.0:
-    %10:vgpr_32 = V_TRUNC_F32_e32 undef %11:vgpr_32, implicit $exec
-    %12:vgpr_32 = V_CVT_U32_F32_e32 killed %10, implicit $exec
+    %10:vgpr_32 = nofpexcept V_TRUNC_F32_e32 undef %11:vgpr_32, implicit $mode, implicit $exec
+    %12:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 killed %10, implicit $mode, implicit $exec
     %50:vgpr_32 = V_LSHRREV_B32_e32 4, killed %12, implicit $exec
     %51:vgpr_32 = V_MUL_LO_I32 killed %50, 3, implicit $exec
     undef %52.sub0:vreg_128 = COPY %51
@@ -102,12 +102,12 @@ body:             |
     S_BRANCH %bb.6
 
   bb.5:
-    %39:vgpr_32 = V_MUL_F32_e32 target-flags(amdgpu-gotprel) 0, killed %55.sub0, implicit $exec
-    %41:vgpr_32 = V_MIN_F32_e32 1106771968, killed %39, implicit $exec
-    %42:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %41, 0, 0, 0, 0, 0, 0, implicit $exec
-    %43:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %42, 0, 0, 0, 0, 0, 0, implicit $exec
-    %44:vgpr_32 = V_MAD_F32 0, killed %43, 0, 0, 0, 0, 0, 0, implicit $exec
-    %45:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, killed %44, 0, undef %46:vgpr_32, 0, 0, implicit $exec
+    %39:vgpr_32 = nofpexcept V_MUL_F32_e32 target-flags(amdgpu-gotprel) 0, killed %55.sub0, implicit $mode, implicit $exec
+    %41:vgpr_32 = nofpexcept V_MIN_F32_e32 1106771968, killed %39, implicit $mode, implicit $exec
+    %42:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, killed %41, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %43:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, killed %42, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %44:vgpr_32 = nofpexcept V_MAD_F32 0, killed %43, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %45:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, killed %44, 0, undef %46:vgpr_32, 0, 0, implicit $mode, implicit $exec
     EXP_DONE 0, killed %45, undef %47:vgpr_32, undef %48:vgpr_32, undef %49:vgpr_32, -1, -1, 15, implicit $exec
     S_ENDPGM 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/dead-lane.mir b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
index 1477c3302c34..8e95009e72c6 100644
--- a/llvm/test/CodeGen/AMDGPU/dead-lane.mir
+++ b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
@@ -2,15 +2,15 @@
 
 # GCN-LABEL: name: dead_lane
 # GCN:      bb.0:
-# GCN-NEXT: undef %3.sub0:vreg_64 = V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %3.sub0, implicit $exec
+# GCN-NEXT: undef %3.sub0:vreg_64 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %3.sub0, implicit $mode, implicit $exec
 # GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %3.sub0,
 ---
 name:            dead_lane
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %1:vgpr_32 = V_MAC_F32_e32 undef %0:vgpr_32, undef %0:vgpr_32, undef %0:vgpr_32, implicit $exec
-    %2:vgpr_32 = V_MAC_F32_e32 undef %0:vgpr_32, undef %0:vgpr_32, undef %0:vgpr_32, implicit $exec
+    %1:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %0:vgpr_32, undef %0:vgpr_32, undef %0:vgpr_32, implicit $mode, implicit $exec
+    %2:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %0:vgpr_32, undef %0:vgpr_32, undef %0:vgpr_32, implicit $mode, implicit $exec
     %3:vreg_64 = REG_SEQUENCE %1:vgpr_32, %subreg.sub0, %2:vgpr_32, %subreg.sub1
     FLAT_STORE_DWORD undef %4:vreg_64, %3.sub0, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM 0

diff  --git a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir
index 3a35c558e6ac..2743f766ddee 100644
--- a/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir
+++ b/llvm/test/CodeGen/AMDGPU/debug-value-scheduler-crash.mir
@@ -32,7 +32,7 @@ body:             |
   ; CHECK:   [[DEF6:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[DEF7:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[DEF8:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-  ; CHECK:   [[V_MUL_F32_e32_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 1082130432, [[DEF1]], implicit $exec
+  ; CHECK:   %9:vgpr_32 = nofpexcept V_MUL_F32_e32 1082130432, [[DEF1]], implicit $mode, implicit $exec
   ; CHECK:   [[DEF9:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[DEF10:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK: bb.1:
@@ -48,29 +48,29 @@ body:             |
   ; CHECK:   [[DEF11:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[DEF12:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[COPY:%[0-9]+]]:vgpr_32 = COPY [[V_MOV_B32_e32_]]
-  ; CHECK:   [[V_MUL_F32_e32_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $exec
-  ; CHECK:   [[V_MUL_F32_e32_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $exec
-  ; CHECK:   [[V_MUL_F32_e32_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_]], implicit $exec
+  ; CHECK:   %16:vgpr_32 = nofpexcept V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $mode, implicit $exec
+  ; CHECK:   %17:vgpr_32 = nofpexcept V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $mode, implicit $exec
+  ; CHECK:   %18:vgpr_32 = nofpexcept V_MUL_F32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_]], implicit $mode, implicit $exec
   ; CHECK:   [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
   ; CHECK:   [[DEF13:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
-  ; CHECK:   [[V_ADD_F32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_]], implicit $exec
-  ; CHECK:   [[V_MUL_F32_e32_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $exec
-  ; CHECK:   dead %23:vgpr_32 = V_MUL_F32_e32 [[V_MUL_F32_e32_4]], [[DEF13]], implicit $exec
-  ; CHECK:   dead [[V_MOV_B32_e32_1]]:vgpr_32 = V_MAC_F32_e32 [[V_ADD_F32_e32_]], [[COPY]], [[V_MOV_B32_e32_1]], implicit $exec
+  ; CHECK:   %21:vgpr_32 = nofpexcept V_ADD_F32_e32 [[V_MOV_B32_e32_]], [[V_MOV_B32_e32_]], implicit $mode, implicit $exec
+  ; CHECK:   %22:vgpr_32 = nofpexcept V_MUL_F32_e32 [[DEF7]], [[DEF7]], implicit $mode, implicit $exec
+  ; CHECK:   dead %23:vgpr_32 = nofpexcept V_MUL_F32_e32 %22, [[DEF13]], implicit $mode, implicit $exec
+  ; CHECK:   dead [[V_MOV_B32_e32_1]]:vgpr_32 = nofpexcept V_MAC_F32_e32 %21, [[COPY]], [[V_MOV_B32_e32_1]], implicit $mode, implicit $exec
   ; CHECK:   [[DEF14:%[0-9]+]]:sreg_64 = IMPLICIT_DEF
   ; CHECK:   $sgpr4 = IMPLICIT_DEF
   ; CHECK:   $vgpr0 = COPY [[DEF11]]
   ; CHECK:   $vgpr0 = COPY [[V_MOV_B32_e32_]]
   ; CHECK:   $vgpr1 = COPY [[DEF7]]
-  ; CHECK:   $vgpr0 = COPY [[V_MUL_F32_e32_1]]
-  ; CHECK:   $vgpr1 = COPY [[V_MUL_F32_e32_2]]
-  ; CHECK:   $vgpr2 = COPY [[V_MUL_F32_e32_3]]
+  ; CHECK:   $vgpr0 = COPY %16
+  ; CHECK:   $vgpr1 = COPY %17
+  ; CHECK:   $vgpr2 = COPY %18
   ; CHECK:   dead $sgpr30_sgpr31 = SI_CALL [[DEF14]], @foo, csr_amdgpu_highregs, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3, implicit killed $sgpr4, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit-def $vgpr0
-  ; CHECK:   [[V_ADD_F32_e32_1:%[0-9]+]]:vgpr_32 = V_ADD_F32_e32 [[V_MUL_F32_e32_]], [[DEF8]], implicit $exec
-  ; CHECK:   [[V_MAC_F32_e32_:%[0-9]+]]:vgpr_32 = V_MAC_F32_e32 [[DEF12]], [[DEF9]], [[V_MAC_F32_e32_]], implicit $exec
-  ; CHECK:   dead %26:vgpr_32 = V_MAD_F32 0, [[V_MAC_F32_e32_]], 0, [[DEF4]], 0, [[DEF1]], 0, 0, implicit $exec
-  ; CHECK:   dead %27:vgpr_32 = V_MAD_F32 0, [[V_MAC_F32_e32_]], 0, [[DEF5]], 0, [[DEF2]], 0, 0, implicit $exec
-  ; CHECK:   dead %28:vgpr_32 = V_MAD_F32 0, [[V_MAC_F32_e32_]], 0, [[DEF6]], 0, [[DEF3]], 0, 0, implicit $exec
+  ; CHECK:   %25:vgpr_32 = nofpexcept V_ADD_F32_e32 %9, [[DEF8]], implicit $mode, implicit $exec
+  ; CHECK:   %25:vgpr_32 = nofpexcept V_MAC_F32_e32 [[DEF12]], [[DEF9]], %25, implicit $mode, implicit $exec
+  ; CHECK:   dead %26:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, [[DEF4]], 0, [[DEF1]], 0, 0, implicit $mode, implicit $exec
+  ; CHECK:   dead %27:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, [[DEF5]], 0, [[DEF2]], 0, 0, implicit $mode, implicit $exec
+  ; CHECK:   dead %28:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, [[DEF6]], 0, [[DEF3]], 0, 0, implicit $mode, implicit $exec
   ; CHECK:   GLOBAL_STORE_DWORD [[DEF]], [[DEF10]], 0, 0, 0, 0, implicit $exec
   ; CHECK:   S_ENDPGM 0
   bb.0:
@@ -85,7 +85,7 @@ body:             |
     %6:vgpr_32 = IMPLICIT_DEF
     %7:vgpr_32 = IMPLICIT_DEF
     %8:vgpr_32 = IMPLICIT_DEF
-    %9:vgpr_32 = V_MUL_F32_e32 1082130432, %1, implicit $exec
+    %9:vgpr_32 = nofpexcept V_MUL_F32_e32 1082130432, %1, implicit $mode, implicit $exec
     %10:vgpr_32 = IMPLICIT_DEF
     %11:vgpr_32 = IMPLICIT_DEF
 
@@ -106,15 +106,15 @@ body:             |
     %13:vgpr_32 = COPY %12
     %14:vgpr_32 = IMPLICIT_DEF
     %15:vgpr_32 = IMPLICIT_DEF
-    %16:vgpr_32 = V_MUL_F32_e32 %7, %7, implicit $exec
-    %17:vgpr_32 = V_MUL_F32_e32 %7, %7, implicit $exec
-    %18:vgpr_32 = V_MUL_F32_e32 %12, %12, implicit $exec
+    %16:vgpr_32 = nofpexcept V_MUL_F32_e32 %7, %7, implicit $mode, implicit $exec
+    %17:vgpr_32 = nofpexcept V_MUL_F32_e32 %7, %7, implicit $mode, implicit $exec
+    %18:vgpr_32 = nofpexcept V_MUL_F32_e32 %12, %12, implicit $mode, implicit $exec
     %19:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %20:vgpr_32 = IMPLICIT_DEF
-    %21:vgpr_32 = V_ADD_F32_e32 %12, %12, implicit $exec
-    %22:vgpr_32 = V_MUL_F32_e32 %7, %7, implicit $exec
-    %23:vgpr_32 = V_MUL_F32_e32 %22, %20, implicit $exec
-    %19:vgpr_32 = V_MAC_F32_e32 %21, %13, %19, implicit $exec
+    %21:vgpr_32 = nofpexcept V_ADD_F32_e32 %12, %12, implicit $mode, implicit $exec
+    %22:vgpr_32 = nofpexcept V_MUL_F32_e32 %7, %7, implicit $mode, implicit $exec
+    %23:vgpr_32 = nofpexcept V_MUL_F32_e32 %22, %20, implicit $mode, implicit $exec
+    %19:vgpr_32 = nofpexcept V_MAC_F32_e32 %21, %13, %19, implicit $mode, implicit $exec
     %24:sreg_64 = IMPLICIT_DEF
     $vgpr0 = COPY %14
     $vgpr0 = COPY %12
@@ -124,11 +124,11 @@ body:             |
     $vgpr2 = COPY %18
     $sgpr4 = IMPLICIT_DEF
     dead $sgpr30_sgpr31 = SI_CALL %24, @foo, csr_amdgpu_highregs, implicit undef $sgpr0_sgpr1_sgpr2_sgpr3, implicit killed $sgpr4, implicit killed $vgpr0, implicit killed $vgpr1, implicit killed $vgpr2, implicit-def $vgpr0
-    %25:vgpr_32 = V_ADD_F32_e32 %9, %8, implicit $exec
-    %25:vgpr_32 = V_MAC_F32_e32 %15, %10, %25, implicit $exec
-    %26:vgpr_32 = V_MAD_F32 0, %25, 0, %4, 0, %1, 0, 0, implicit $exec
-    %27:vgpr_32 = V_MAD_F32 0, %25, 0, %5, 0, %2, 0, 0, implicit $exec
-    %28:vgpr_32 = V_MAD_F32 0, %25, 0, %6, 0, %3, 0, 0, implicit $exec
+    %25:vgpr_32 = nofpexcept V_ADD_F32_e32 %9, %8, implicit $mode, implicit $exec
+    %25:vgpr_32 = nofpexcept V_MAC_F32_e32 %15, %10, %25, implicit $mode, implicit $exec
+    %26:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, %4, 0, %1, 0, 0, implicit $mode, implicit $exec
+    %27:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, %5, 0, %2, 0, 0, implicit $mode, implicit $exec
+    %28:vgpr_32 = nofpexcept V_MAD_F32 0, %25, 0, %6, 0, %3, 0, 0, implicit $mode, implicit $exec
     GLOBAL_STORE_DWORD %0, %11, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/dpp_combine.mir b/llvm/test/CodeGen/AMDGPU/dpp_combine.mir
index 859c21d8842f..358a331da1a5 100644
--- a/llvm/test/CodeGen/AMDGPU/dpp_combine.mir
+++ b/llvm/test/CodeGen/AMDGPU/dpp_combine.mir
@@ -298,10 +298,10 @@ body:             |
 # check for floating point modifiers
 # GCN-LABEL: name: add_f32_e64
 # GCN: %3:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-# GCN: %4:vgpr_32 = V_ADD_F32_e64 0, %3, 0, %0, 0, 1, implicit $exec
-# GCN: %6:vgpr_32 = V_ADD_F32_dpp %2, 0, %1, 0, %0, 1, 15, 15, 1, implicit $exec
-# GCN: %8:vgpr_32 = V_ADD_F32_dpp %2, 1, %1, 2, %0, 1, 15, 15, 1, implicit $exec
-# GCN: %10:vgpr_32 = V_ADD_F32_e64 4, %9, 8, %0, 0, 0, implicit $exec
+# GCN: %4:vgpr_32 = V_ADD_F32_e64 0, %3, 0, %0, 0, 1, implicit $mode, implicit $exec
+# GCN: %6:vgpr_32 = V_ADD_F32_dpp %2, 0, %1, 0, %0, 1, 15, 15, 1, implicit $mode, implicit $exec
+# GCN: %8:vgpr_32 = V_ADD_F32_dpp %2, 1, %1, 2, %0, 1, 15, 15, 1, implicit $mode, implicit $exec
+# GCN: %10:vgpr_32 = V_ADD_F32_e64 4, %9, 8, %0, 0, 0, implicit $mode, implicit $exec
 
 name: add_f32_e64
 tracksRegLiveness: true
@@ -315,19 +315,19 @@ body:             |
 
     ; this shouldn't be combined as omod is set
     %3:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-    %4:vgpr_32 = V_ADD_F32_e64 0, %3, 0, %0, 0, 1, implicit $exec
+    %4:vgpr_32 = V_ADD_F32_e64 0, %3, 0, %0, 0, 1, implicit $mode, implicit $exec
 
     ; this should be combined as all modifiers are default
     %5:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-    %6:vgpr_32 = V_ADD_F32_e64 0, %5, 0, %0, 0, 0, implicit $exec
+    %6:vgpr_32 = V_ADD_F32_e64 0, %5, 0, %0, 0, 0, implicit $mode, implicit $exec
 
     ; this should be combined as modifiers other than abs|neg are default
     %7:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-    %8:vgpr_32 = V_ADD_F32_e64 1, %7, 2, %0, 0, 0, implicit $exec
+    %8:vgpr_32 = V_ADD_F32_e64 1, %7, 2, %0, 0, 0, implicit $mode, implicit $exec
 
     ; this shouldn't be combined as modifiers aren't abs|neg
     %9:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-    %10:vgpr_32 = V_ADD_F32_e64 4, %9, 8, %0, 0, 0, implicit $exec
+    %10:vgpr_32 = V_ADD_F32_e64 4, %9, 8, %0, 0, 0, implicit $mode, implicit $exec
 ...
 
 # check for e64 modifiers
@@ -532,73 +532,73 @@ body: |
 
 # Test instruction which does not have modifiers in VOP1 form but does in DPP form.
 # GCN-LABEL: name: dpp_vop1
-# GCN: %3:vgpr_32 = V_CEIL_F32_dpp %0, 0, undef %2:vgpr_32, 1, 15, 15, 1, implicit $exec
+# GCN: %3:vgpr_32 = V_CEIL_F32_dpp %0, 0, undef %2:vgpr_32, 1, 15, 15, 1, implicit $mode, implicit $exec
 name: dpp_vop1
 tracksRegLiveness: true
 body: |
   bb.0:
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = V_MOV_B32_dpp %1:vgpr_32, undef %0:vgpr_32, 1, 15, 15, 1, implicit $exec
-    %3:vgpr_32 = V_CEIL_F32_e32 %2, implicit $exec
+    %3:vgpr_32 = V_CEIL_F32_e32 %2, implicit $mode, implicit $exec
 ...
 
 # Test instruction which does not have modifiers in VOP2 form but does in DPP form.
 # GCN-LABEL: name: dpp_min
-# GCN: %3:vgpr_32 = V_MIN_F32_dpp %0, 0, undef %2:vgpr_32, 0, undef %4:vgpr_32, 1, 15, 15, 1, implicit $exec
+# GCN: %3:vgpr_32 = V_MIN_F32_dpp %0, 0, undef %2:vgpr_32, 0, undef %4:vgpr_32, 1, 15, 15, 1, implicit $mode, implicit $exec
 name: dpp_min
 tracksRegLiveness: true
 body: |
   bb.0:
     %1:vgpr_32 = IMPLICIT_DEF
     %2:vgpr_32 = V_MOV_B32_dpp %1:vgpr_32, undef %0:vgpr_32, 1, 15, 15, 1, implicit $exec
-    %4:vgpr_32 = V_MIN_F32_e32 %2, undef %3:vgpr_32, implicit $exec
+    %4:vgpr_32 = V_MIN_F32_e32 %2, undef %3:vgpr_32, implicit $mode, implicit $exec
 ...
 
 # Test an undef old operand
 # GCN-LABEL: name: dpp_undef_old
-# GCN: %3:vgpr_32 = V_CEIL_F32_dpp undef %1:vgpr_32, 0, undef %2:vgpr_32, 1, 15, 15, 1, implicit $exec
+# GCN: %3:vgpr_32 = V_CEIL_F32_dpp undef %1:vgpr_32, 0, undef %2:vgpr_32, 1, 15, 15, 1, implicit $mode, implicit $exec
 name: dpp_undef_old
 tracksRegLiveness: true
 body: |
   bb.0:
     %2:vgpr_32 = V_MOV_B32_dpp undef %1:vgpr_32, undef %0:vgpr_32, 1, 15, 15, 1, implicit $exec
-    %3:vgpr_32 = V_CEIL_F32_e32 %2, implicit $exec
+    %3:vgpr_32 = V_CEIL_F32_e32 %2, implicit $mode, implicit $exec
 ...
 
 # Do not combine a dpp mov which writes a physreg.
 # GCN-LABEL: name: phys_dpp_mov_dst
 # GCN: $vgpr0 = V_MOV_B32_dpp undef %0:vgpr_32, undef %1:vgpr_32, 1, 15, 15, 1, implicit $exec
-# GCN: %2:vgpr_32 = V_CEIL_F32_e32 $vgpr0, implicit $exec
+# GCN: %2:vgpr_32 = V_CEIL_F32_e32 $vgpr0, implicit $mode, implicit $exec
 name: phys_dpp_mov_dst
 tracksRegLiveness: true
 body: |
   bb.0:
     $vgpr0 = V_MOV_B32_dpp undef %1:vgpr_32, undef %0:vgpr_32, 1, 15, 15, 1, implicit $exec
-    %2:vgpr_32 = V_CEIL_F32_e32 $vgpr0, implicit $exec
+    %2:vgpr_32 = V_CEIL_F32_e32 $vgpr0, implicit $mode, implicit $exec
 ...
 
 # Do not combine a dpp mov which reads a physreg.
 # GCN-LABEL: name: phys_dpp_mov_old_src
 # GCN: %0:vgpr_32 = V_MOV_B32_dpp undef $vgpr0, undef %1:vgpr_32, 1, 15, 15, 1, implicit $exec
-# GCN: %2:vgpr_32 = V_CEIL_F32_e32 %0, implicit $exec
+# GCN: %2:vgpr_32 = V_CEIL_F32_e32 %0, implicit $mode, implicit $exec
 name: phys_dpp_mov_old_src
 tracksRegLiveness: true
 body: |
   bb.0:
     %1:vgpr_32 = V_MOV_B32_dpp undef $vgpr0, undef %0:vgpr_32, 1, 15, 15, 1, implicit $exec
-    %2:vgpr_32 = V_CEIL_F32_e32 %1, implicit $exec
+    %2:vgpr_32 = V_CEIL_F32_e32 %1, implicit $mode, implicit $exec
 ...
 
 # Do not combine a dpp mov which reads a physreg.
 # GCN-LABEL: name: phys_dpp_mov_src
 # GCN: %0:vgpr_32 = V_MOV_B32_dpp undef %1:vgpr_32, undef $vgpr0, 1, 15, 15, 1, implicit $exec
-# GCN: %2:vgpr_32 = V_CEIL_F32_e32 %0, implicit $exec
+# GCN: %2:vgpr_32 = V_CEIL_F32_e32 %0, implicit $mode, implicit $exec
 name: phys_dpp_mov_src
 tracksRegLiveness: true
 body: |
   bb.0:
     %1:vgpr_32 = V_MOV_B32_dpp undef %0:vgpr_32, undef $vgpr0, 1, 15, 15, 1, implicit $exec
-    %2:vgpr_32 = V_CEIL_F32_e32 %1, implicit $exec
+    %2:vgpr_32 = V_CEIL_F32_e32 %1, implicit $mode, implicit $exec
 ...
 
 # GCN-LABEL: name: dpp_reg_sequence_both_combined
@@ -817,7 +817,7 @@ body: |
 
 # Make sure flags aren't dropped
 # GCN-LABEL: name: flags_add_f32_e64
-# GCN: %4:vgpr_32 = nnan nofpexcept V_ADD_F32_dpp %2, 0, %1, 0, %0, 1, 15, 15, 1, implicit $exec
+# GCN: %4:vgpr_32 = nnan nofpexcept V_ADD_F32_dpp %2, 0, %1, 0, %0, 1, 15, 15, 1, implicit $mode, implicit $exec
 name: flags_add_f32_e64
 tracksRegLiveness: true
 body:             |
@@ -829,7 +829,7 @@ body:             |
     %2:vgpr_32 = IMPLICIT_DEF
 
     %3:vgpr_32 = V_MOV_B32_dpp undef %2, %1, 1, 15, 15, 1, implicit $exec
-    %4:vgpr_32 = nofpexcept nnan V_ADD_F32_e64 0, %3, 0, %0, 0, 0, implicit $exec
+    %4:vgpr_32 = nofpexcept nnan V_ADD_F32_e64 0, %3, 0, %0, 0, 0, implicit $mode, implicit $exec
     S_ENDPGM 0, implicit %4
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir b/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
index baa54b492f61..95a878c1997f 100644
--- a/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
+++ b/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
@@ -18,7 +18,7 @@ body:             |
     %3 = IMPLICIT_DEF
     $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $mode, implicit $exec
     %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM 0
 ...
@@ -42,7 +42,7 @@ body:             |
     %3 = IMPLICIT_DEF
     $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, 0, implicit $exec, implicit $flat_scr
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $mode, implicit $exec
     %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM 0
 ...
@@ -66,7 +66,7 @@ body:             |
     %3 = IMPLICIT_DEF
     $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $mode, implicit $exec
     %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM 0
 ...
@@ -173,7 +173,7 @@ body:             |
   bb.1:
     %0 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit $exec
+    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit $mode, implicit $exec
     %3 = S_ADD_U32 %2, 1, implicit-def $scc
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
index 394df72a1c83..7bc14939624d 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
@@ -11,7 +11,7 @@ body:               |
     %1:sreg_32 = IMPLICIT_DEF
     %2:sreg_32 = IMPLICIT_DEF
     %3:sreg_32 = IMPLICIT_DEF
-    %4:vgpr_32 = V_CVT_U32_F32_e64 0, %0:vgpr_32, 0, 0, implicit $exec
+    %4:vgpr_32 = V_CVT_U32_F32_e64 0, %0:vgpr_32, 0, 0, implicit $mode, implicit $exec
     %5:sreg_32 = COPY %4:vgpr_32
     %6:sreg_32 = S_ADD_I32 %2:sreg_32, %5:sreg_32, implicit-def $scc
     %7:sreg_32 = S_ADDC_U32 %3:sreg_32, %1:sreg_32, implicit-def $scc, implicit $scc

diff  --git a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
index e76f1be6c485..b81556c94cce 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
@@ -111,7 +111,7 @@
 #  literal constant.
 
 # CHECK-LABEL: name: add_f32_1.0_one_f16_use
-# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit $exec
+# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit $mode, implicit $exec
 
 name:            add_f32_1.0_one_f16_use
 alignment:       1
@@ -160,7 +160,7 @@ body:             |
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
     %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %12 = V_MOV_B32_e32 1065353216, implicit $exec
-    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $exec
+    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM 0
 
@@ -171,8 +171,8 @@ body:             |
 
 # CHECK-LABEL: name: add_f32_1.0_multi_f16_use
 # CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $mode, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $mode, implicit $exec
 
 
 name:            add_f32_1.0_multi_f16_use
@@ -225,8 +225,8 @@ body:             |
     %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %13 = V_MOV_B32_e32 1065353216, implicit $exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM 0
@@ -238,8 +238,8 @@ body:             |
 #  immediate, and folded into the single f16 use as a literal constant
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_one_f16_use
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $exec
-# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $mode, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $mode, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_one_f16_use
 alignment:       1
@@ -293,8 +293,8 @@ body:             |
     %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %14 = V_MOV_B32_e32 1065353216, implicit $exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
-    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
+    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM 0
@@ -307,9 +307,9 @@ body:             |
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use
 # CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit $exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit $exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit $mode, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit $mode, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $mode, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_multi_f16_use
 alignment:       1
@@ -364,9 +364,9 @@ body:             |
     %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %14 = V_MOV_B32_e32 1065353216, implicit $exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
@@ -376,8 +376,8 @@ body:             |
 ---
 # CHECK-LABEL: name: add_i32_1_multi_f16_use
 # CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $mode, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $mode, implicit $exec
 
 
 name:            add_i32_1_multi_f16_use
@@ -430,8 +430,8 @@ body:             |
     %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %13 = V_MOV_B32_e32 1, implicit $exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM 0
@@ -441,9 +441,9 @@ body:             |
 
 # CHECK-LABEL: name: add_i32_m2_one_f32_use_multi_f16_use
 # CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $mode, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $mode, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $mode, implicit $exec
 
 name:            add_i32_m2_one_f32_use_multi_f16_use
 alignment:       1
@@ -498,9 +498,9 @@ body:             |
     %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %14 = V_MOV_B32_e32 -2, implicit $exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
@@ -514,8 +514,8 @@ body:             |
 
 # CHECK-LABEL: name: add_f16_1.0_multi_f32_use
 # CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit $exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $mode, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $mode, implicit $exec
 
 name:            add_f16_1.0_multi_f32_use
 alignment:       1
@@ -567,8 +567,8 @@ body:             |
     %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %13 = V_MOV_B32_e32 15360, implicit $exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
-    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
+    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM 0
@@ -581,8 +581,8 @@ body:             |
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use
 # CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit $exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $mode, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $mode, implicit $exec
 
 name:            add_f16_1.0_other_high_bits_multi_f16_use
 alignment:       1
@@ -634,8 +634,8 @@ body:             |
     %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %13 = V_MOV_B32_e32 80886784, implicit $exec
-    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM 0
@@ -648,8 +648,8 @@ body:             |
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32
 # CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit $exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $mode, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $mode, implicit $exec
 name:            add_f16_1.0_other_high_bits_use_f16_f32
 alignment:       1
 exposesReturnsTwice: false
@@ -700,8 +700,8 @@ body:             |
     %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
     %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
     %13 = V_MOV_B32_e32 305413120, implicit $exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec
     BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM 0

diff  --git a/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir b/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
index e26f0c934fce..4eef4d647701 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
@@ -2,7 +2,7 @@
 ...
 # GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
 # GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
-# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
+# GCN-NEXT: %24:vgpr_32 = nofpexcept V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $mode, implicit $exec
 
 name:            no_fold_imm_madak_mac_clamp_f32
 tracksRegLiveness: true
@@ -64,7 +64,7 @@ body:             |
     %22 = COPY %29
     %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     %23 = V_MOV_B32_e32 1090519040, implicit $exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
+    %24 = nofpexcept V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $mode, implicit $exec
     %26 = COPY %29
     BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
@@ -73,7 +73,7 @@ body:             |
 ---
 # GCN-LABEL: name: no_fold_imm_madak_mac_omod_f32
 # GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
-# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
+# GCN: %24:vgpr_32 = nofpexcept V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $mode, implicit $exec
 
 name:            no_fold_imm_madak_mac_omod_f32
 tracksRegLiveness: true
@@ -135,7 +135,7 @@ body:             |
     %22 = COPY %29
     %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     %23 = V_MOV_B32_e32 1090519040, implicit $exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
+    %24 = nofpexcept V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $mode, implicit $exec
     %26 = COPY %29
     BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
@@ -144,7 +144,7 @@ body:             |
 ---
 # GCN: name: no_fold_imm_madak_mad_clamp_f32
 # GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
+# GCN: %24:vgpr_32 = nofpexcept V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $mode, implicit $exec
 
 name:            no_fold_imm_madak_mad_clamp_f32
 tracksRegLiveness: true
@@ -206,7 +206,7 @@ body:             |
     %22 = COPY %29
     %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     %23 = V_MOV_B32_e32 1090519040, implicit $exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
+    %24 = nofpexcept V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $mode, implicit $exec
     %26 = COPY %29
     BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0
@@ -215,7 +215,7 @@ body:             |
 ---
 # GCN: name: no_fold_imm_madak_mad_omod_f32
 # GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
+# GCN: %24:vgpr_32 = nofpexcept V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $mode, implicit $exec
 
 name:            no_fold_imm_madak_mad_omod_f32
 tracksRegLiveness: true
@@ -277,7 +277,7 @@ body:             |
     %22 = COPY %29
     %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     %23 = V_MOV_B32_e32 1090519040, implicit $exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
+    %24 = nofpexcept V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $mode, implicit $exec
     %26 = COPY %29
     BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM 0

diff  --git a/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir
index 6f0e6e39eea8..1b87ef241cac 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-vgpr-copy.mir
@@ -39,7 +39,7 @@ body:             |
 
 # GCN-LABEL: name: fma_sgpr_use
 # GCN:      %0:sreg_64_xexec = IMPLICIT_DEF
-# GCN-NEXT: %4:vgpr_32 = nnan ninf nsz arcp contract afn reassoc V_FMA_F32 2, %0.sub0, 0, 1073741824, 0, %0.sub1, 0, 0, implicit $exec
+# GCN-NEXT: %4:vgpr_32 = nnan ninf nsz arcp contract afn reassoc V_FMA_F32 2, %0.sub0, 0, 1073741824, 0, %0.sub1, 0, 0, implicit $mode, implicit $exec
 ---
 name:            fma_sgpr_use
 body:             |
@@ -48,6 +48,6 @@ body:             |
     %1:sgpr_32 = COPY %0.sub0
     %2:sgpr_32 = COPY %0.sub1
     %3:vgpr_32 = COPY %2
-    %4:vgpr_32 = nnan ninf nsz arcp contract afn reassoc V_FMAC_F32_e64 2, %1, 0, 1073741824, 0, %3, 0, 0, implicit $exec
+    %4:vgpr_32 = nnan ninf nsz arcp contract afn reassoc V_FMAC_F32_e64 2, %1, 0, 1073741824, 0, %3, 0, 0, implicit $mode, implicit $exec
     DS_WRITE2_B32_gfx9 undef %5:vgpr_32, killed %4, undef %6:vgpr_32, 0, 1, 0, implicit $exec
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
index f1b5ee3524d9..5f4e6830eb44 100644
--- a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
+++ b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir
@@ -11,7 +11,7 @@ name:            flat_atomic_fcmpswap_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fcmpswap_x2_to_s_denorm_mode
@@ -25,7 +25,7 @@ name:            flat_atomic_fcmpswap_x2_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmax_to_s_denorm_mode
@@ -39,7 +39,7 @@ name:            flat_atomic_fmax_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmax_x2_to_s_denorm_mode
@@ -53,7 +53,7 @@ name:            flat_atomic_fmax_x2_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmin_to_s_denorm_mode
@@ -67,7 +67,7 @@ name:            flat_atomic_fmin_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmin_x2_to_s_denorm_mode
@@ -81,7 +81,7 @@ name:            flat_atomic_fmin_x2_to_s_denorm_mode
 body:            |
   bb.0:
     FLAT_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
@@ -95,7 +95,7 @@ name:            flat_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = FLAT_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmax_rtn_to_s_denorm_mode
@@ -109,7 +109,7 @@ name:            flat_atomic_fmax_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = FLAT_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmax_x2_rtn_to_s_denorm_mode
@@ -123,7 +123,7 @@ name:            flat_atomic_fmax_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = FLAT_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmin_rtn_to_s_denorm_mode
@@ -137,7 +137,7 @@ name:            flat_atomic_fmin_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = FLAT_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fmin_x2_rtn_to_s_denorm_mode
@@ -151,7 +151,7 @@ name:            flat_atomic_fmin_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = FLAT_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_atomic_fcmpswap_rtn_to_s_denorm_mode
@@ -165,7 +165,7 @@ name:            flat_atomic_fcmpswap_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = FLAT_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_to_s_denorm_mode
@@ -179,7 +179,7 @@ name:            global_atomic_fcmpswap_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_x2_to_s_denorm_mode
@@ -193,7 +193,7 @@ name:            global_atomic_fcmpswap_x2_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_to_s_denorm_mode
@@ -207,7 +207,7 @@ name:            global_atomic_fmax_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_x2_to_s_denorm_mode
@@ -221,7 +221,7 @@ name:            global_atomic_fmax_x2_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_to_s_denorm_mode
@@ -235,7 +235,7 @@ name:            global_atomic_fmin_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_x2_to_s_denorm_mode
@@ -249,7 +249,7 @@ name:            global_atomic_fmin_x2_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_rtn_to_s_denorm_mode
@@ -263,7 +263,7 @@ name:            global_atomic_fcmpswap_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = GLOBAL_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
@@ -277,7 +277,7 @@ name:            global_atomic_fcmpswap_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_rtn_to_s_denorm_mode
@@ -291,7 +291,7 @@ name:            global_atomic_fmax_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_x2_rtn_to_s_denorm_mode
@@ -305,7 +305,7 @@ name:            global_atomic_fmax_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_rtn_to_s_denorm_mode
@@ -319,7 +319,7 @@ name:            global_atomic_fmin_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_x2_rtn_to_s_denorm_mode
@@ -333,7 +333,7 @@ name:            global_atomic_fmin_x2_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_saddr_to_s_denorm_mode
@@ -347,7 +347,7 @@ name:            global_atomic_fcmpswap_saddr_to_s_denorm_mode
 body:            |
   bb.0:
     GLOBAL_ATOMIC_FCMPSWAP_SADDR undef %0:vreg_64, undef %1:vgpr_32, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fcmpswap_x2_saddr_rtn_to_s_denorm_mode
@@ -361,7 +361,7 @@ name:            global_atomic_fcmpswap_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_SADDR_RTN undef %0:vreg_64, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_saddr_rtn_to_s_denorm_mode
@@ -375,7 +375,7 @@ name:            global_atomic_fmax_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_SADDR_RTN undef %0:vreg_64, undef %1:vgpr_32, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmax_x2_saddr_rtn_to_s_denorm_mode
@@ -389,7 +389,7 @@ name:            global_atomic_fmax_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_SADDR_RTN undef %0:vreg_64, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_saddr_rtn_to_s_denorm_mode
@@ -403,7 +403,7 @@ name:            global_atomic_fmin_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_SADDR_RTN undef %0:vreg_64, undef %1:vgpr_32, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: global_atomic_fmin_x2_saddr_rtn_to_s_denorm_mode
@@ -417,7 +417,7 @@ name:            global_atomic_fmin_x2_saddr_rtn_to_s_denorm_mode
 body:            |
   bb.0:
     %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_SADDR_RTN undef %0:vreg_64, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_fp_atomic_to_s_denorm_mode_waitcnt
@@ -430,7 +430,7 @@ body:            |
   bb.0:
     FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
     S_WAITCNT 0
-    S_DENORM_MODE 0
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...
 
 # GCN-LABEL: name: flat_fp_atomic_to_s_denorm_mode_valu
@@ -442,6 +442,6 @@ name:            flat_fp_atomic_to_s_denorm_mode_valu
 body:            |
   bb.0:
     FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst 4 on `float addrspace(1)* undef`)
-    %2:vgpr_32 = V_ADD_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, implicit $exec
-    S_DENORM_MODE 0
+    %2:vgpr_32 = V_ADD_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, implicit $mode, implicit $exec
+    S_DENORM_MODE 0, implicit-def $mode, implicit $mode
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/hazard-buffer-store-v-interp.mir b/llvm/test/CodeGen/AMDGPU/hazard-buffer-store-v-interp.mir
index bd6244127e6f..a8c82e6cf254 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-buffer-store-v-interp.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-buffer-store-v-interp.mir
@@ -11,9 +11,9 @@ name:            hazard_buffer_store_v_interp
 body:             |
   bb.0.entry:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr7, $vgpr8, $vgpr9, $vgpr10
-  
+
     BUFFER_STORE_DWORDX4_OFFSET_exact killed $vgpr7_vgpr8_vgpr9_vgpr10, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 96, 0, 0, 0, 0, 0, implicit $exec
-    $vgpr7 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $m0, implicit $exec
+    $vgpr7 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/hazard-hidden-bundle.mir b/llvm/test/CodeGen/AMDGPU/hazard-hidden-bundle.mir
index d0f32f287473..830e9aa340fd 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-hidden-bundle.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-hidden-bundle.mir
@@ -32,7 +32,7 @@ body:	|
     S_SENDMSG 3, implicit $exec, implicit $m0
     $m0 = S_MOV_B32 $sgpr8
     BUNDLE implicit-def $vgpr0 {
-      $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+      $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $mode, implicit $m0, implicit $exec
     }
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/hazard-in-bundle.mir b/llvm/test/CodeGen/AMDGPU/hazard-in-bundle.mir
index c88be5fdaba3..8d02f7a60add 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-in-bundle.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-in-bundle.mir
@@ -38,7 +38,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     BUNDLE implicit-def $sgpr0_sgpr1 {
       $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
-      $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+      $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     }
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/hazard-kill.mir b/llvm/test/CodeGen/AMDGPU/hazard-kill.mir
index 5f4b55132112..6602c079986e 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-kill.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-kill.mir
@@ -19,12 +19,12 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     liveins: $sgpr2, $sgpr3, $sgpr4
-  
+
     $sgpr6 = S_MOV_B32 killed $sgpr3
     renamable $sgpr8_sgpr9_sgpr10_sgpr11 = S_LOAD_DWORDX4_IMM renamable $sgpr6_sgpr7, 16, 0, 0
     $m0 = S_MOV_B32 killed renamable $sgpr4
     dead renamable $sgpr0 = KILL undef renamable $sgpr2
-    renamable $vgpr0 = V_INTERP_MOV_F32 2, 0, 0, implicit $m0, implicit $exec
+    renamable $vgpr0 = V_INTERP_MOV_F32 2, 0, 0, implicit $mode, implicit $m0, implicit $exec
     renamable $sgpr0 = S_MOV_B32 0
 
     S_ENDPGM 0

diff  --git a/llvm/test/CodeGen/AMDGPU/hazard.mir b/llvm/test/CodeGen/AMDGPU/hazard.mir
index bc62bd9ef087..1b53aac3646b 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard.mir
@@ -27,7 +27,7 @@ body:             |
 
     $m0 = S_MOV_B32 killed $sgpr7
     $vgpr5 = IMPLICIT_DEF
-    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $mode, implicit $m0, implicit $exec
     SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 
 ...
@@ -56,7 +56,7 @@ body:             |
 
     $m0 = S_MOV_B32 killed $sgpr7
     INLINEASM &"; no-op", 1, 327690, def $vgpr5
-    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $mode, implicit $m0, implicit $exec
     SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 ...
 

diff  --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-callee.mir b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-callee.mir
index b9f6c2f79db0..0ffed0ae4bfd 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-callee.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-callee.mir
@@ -19,7 +19,7 @@ liveins:
 name: entry_callee_wait
 body:             |
   bb.0:
-    $vgpr0 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $exec
+    $vgpr0 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $mode, implicit $exec
     S_SETPC_B64 killed $sgpr0_sgpr1
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir b/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
index c8778c73aea3..a8c930d27c9b 100644
--- a/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -79,22 +79,22 @@ name: div_fmas
 body: |
   bb.0:
     $vcc = S_MOV_B64 0
-    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $mode, implicit $vcc, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
     implicit $vcc = V_CMP_EQ_I32_e32 $vgpr1, $vgpr2, implicit $exec
-    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $mode, implicit $vcc, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
     $vcc = V_CMP_EQ_I32_e64 $vgpr1, $vgpr2, implicit $exec
-    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $mode, implicit $vcc, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
-    $vgpr4, $vcc = V_DIV_SCALE_F32 $vgpr1, $vgpr1, $vgpr3, implicit $exec
-    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
+    $vgpr4, $vcc = V_DIV_SCALE_F32 $vgpr1, $vgpr1, $vgpr3, implicit $mode, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $mode, implicit $vcc, implicit $exec
     S_ENDPGM 0
 
 ...
@@ -128,24 +128,24 @@ name: s_getreg
 
 body: |
   bb.0:
-    S_SETREG_B32 $sgpr0, 1
-    $sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 1, implicit-def $mode, implicit $mode
+    $sgpr1 = S_GETREG_B32 1, implicit-def $mode, implicit $mode
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_IMM32_B32 0, 1
-    $sgpr1 = S_GETREG_B32 1
+    S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
+    $sgpr1 = S_GETREG_B32 1, implicit-def $mode, implicit $mode
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 $sgpr0, 1
+    S_SETREG_B32 $sgpr0, 1, implicit-def $mode, implicit $mode
     $sgpr1 = S_MOV_B32 0
-    $sgpr2 = S_GETREG_B32 1
+    $sgpr2 = S_GETREG_B32 1, implicit-def $mode, implicit $mode
     S_BRANCH %bb.3
 
   bb.3:
-    S_SETREG_B32 $sgpr0, 0
-    $sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 0, implicit-def $mode, implicit $mode
+    $sgpr1 = S_GETREG_B32 1, implicit-def $mode, implicit $mode
     S_ENDPGM 0
 ...
 
@@ -173,18 +173,18 @@ name: s_setreg
 
 body: |
   bb.0:
-    S_SETREG_B32 $sgpr0, 1
-    S_SETREG_B32 $sgpr1, 1
+    S_SETREG_B32 $sgpr0, 1, implicit-def $mode, implicit $mode
+    S_SETREG_B32 $sgpr1, 1, implicit-def $mode, implicit $mode
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 $sgpr0, 64
-    S_SETREG_B32 $sgpr1, 128
+    S_SETREG_B32 $sgpr0, 64, implicit-def $mode, implicit $mode
+    S_SETREG_B32 $sgpr1, 128, implicit-def $mode, implicit $mode
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 $sgpr0, 1
-    S_SETREG_B32 $sgpr1, 0
+    S_SETREG_B32 $sgpr0, 1, implicit-def $mode, implicit $mode
+    S_SETREG_B32 $sgpr1, 0, implicit-def $mode, implicit $mode
     S_ENDPGM 0
 ...
 
@@ -342,12 +342,12 @@ name: rfe
 
 body: |
   bb.0:
-    S_SETREG_B32 $sgpr0, 3
+    S_SETREG_B32 $sgpr0, 3, implicit-def $mode, implicit $mode
     S_RFE_B64 $sgpr2_sgpr3
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 $sgpr0, 0
+    S_SETREG_B32 $sgpr0, 0, implicit-def $mode, implicit $mode
     S_RFE_B64 $sgpr2_sgpr3
     S_ENDPGM 0
 
@@ -461,22 +461,22 @@ name: v_interp
 body: |
   bb.0:
     $m0 = S_MOV_B32 0
-    $vgpr0 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
     $m0 = S_MOV_B32 0
-    $vgpr0 = V_INTERP_P2_F32 $vgpr0, $vgpr1, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P2_F32 $vgpr0, $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
     $m0 = S_MOV_B32 0
-    $vgpr0 = V_INTERP_P1_F32_16bank $vgpr0, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P1_F32_16bank $vgpr0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
     $m0 = S_MOV_B32 0
-    $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_ENDPGM 0
 ...
 

diff  --git a/llvm/test/CodeGen/AMDGPU/madak-inline-constant.mir b/llvm/test/CodeGen/AMDGPU/madak-inline-constant.mir
index 473c69e38395..935e91a3a864 100644
--- a/llvm/test/CodeGen/AMDGPU/madak-inline-constant.mir
+++ b/llvm/test/CodeGen/AMDGPU/madak-inline-constant.mir
@@ -4,7 +4,7 @@
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192
 # GCN:  S_MOV_B32 1082130432
-# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $exec
+# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src1-inlined
@@ -15,7 +15,7 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:sreg_32 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %18, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %18, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
@@ -23,7 +23,7 @@ body:             |
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192
 # GCN:  S_MOV_B32 1082130432
-# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $exec
+# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src0-inlined
@@ -34,14 +34,14 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:sreg_32 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed %0, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed %0, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192
 # GCN:  S_MOV_B32 1082130432
-# GCN:  %3:vgpr_32 = V_MADAK_F32 killed %0, killed %0, 1092616192, implicit $exec
+# GCN:  %3:vgpr_32 = V_MADAK_F32 killed %0, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test none-inlined
@@ -52,14 +52,14 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:sreg_32 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %0, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %0, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192
 # GCN:  V_MOV_B32_e32 1082130432
-# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $exec
+# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src1-2vgprs-inlined
@@ -70,7 +70,7 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:vgpr_32 = V_MOV_B32_e32 1082130432, implicit $exec
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %18, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed %18, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
@@ -78,7 +78,7 @@ body:             |
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192
 # GCN:  V_MOV_B32_e32 1082130432
-# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $exec
+# GCN:  %3:vgpr_32 = V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src0-2vgprs-inlined
@@ -89,14 +89,14 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:vgpr_32 = V_MOV_B32_e32 1082130432, implicit $exec
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed %0, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed %0, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192, implicit $exec
 # GCN:  S_MOV_B32 1082130432
-# GCN:  V_MADAK_F32 1082130432, killed $vgpr1, 1092616192, implicit $exec
+# GCN:  V_MADAK_F32 1082130432, killed $vgpr1, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src0-phys-vgpr
@@ -108,14 +108,14 @@ body:             |
     $vgpr1 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:sgpr_32 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed $vgpr1, 0, killed %18, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed $vgpr1, 0, killed %18, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192, implicit $exec
 # GCN:  S_MOV_B32 1082130432
-# GCN:  V_MADAK_F32 1082130432, killed $vgpr0, 1092616192, implicit $exec
+# GCN:  V_MADAK_F32 1082130432, killed $vgpr0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src1-phys-vgpr
@@ -127,13 +127,13 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     %18:sgpr_32 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed $vgpr0, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %18, 0, killed $vgpr0, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192, implicit $exec
-# GCN:  V_MAC_F32_e64 0, killed $sgpr2, 0, killed %0, 0, %1, 0, 0, implicit $exec
+# GCN:  V_MAC_F32_e64 0, killed $sgpr2, 0, killed %0, 0, %1, 0, 0, implicit $mode, implicit $exec
 
 ---
 name:            test src0-phys-sgpr
@@ -144,13 +144,13 @@ body:             |
 
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed $sgpr2, 0, killed %0, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed $sgpr2, 0, killed %0, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192, implicit $exec
-# GCN:  V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %1, 0, 0, implicit $exec
+# GCN:  V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %1, 0, 0, implicit $mode, implicit $exec
 
 ---
 name:            test src1-phys-sgpr
@@ -161,14 +161,14 @@ body:             |
 
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: bb.0:
 # GCN:  V_MOV_B32_e32 1092616192, implicit $exec
 # GCN:  $sgpr2 = S_MOV_B32 1082130432
-# GCN:  V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $exec
+# GCN:  V_MADAK_F32 1082130432, killed %0, 1092616192, implicit $mode, implicit $exec
 
 ---
 name:            test src1-phys-sgpr-move
@@ -180,6 +180,6 @@ body:             |
     %0:vgpr_32 = COPY $vgpr0
     %17:vgpr_32 = V_MOV_B32_e32 1092616192, implicit $exec
     $sgpr2 = S_MOV_B32 1082130432
-    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %17, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAC_F32_e64 0, killed %0, 0, killed $sgpr2, 0, %17, 0, 0, implicit $mode, implicit $exec
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
index 9f49f9fd5852..59ce256dc012 100644
--- a/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
+++ b/llvm/test/CodeGen/AMDGPU/mai-hazards.mir
@@ -11,7 +11,7 @@ body:             |
   bb.0:
     $vgpr0 = V_MOV_B32_e32 1, implicit $exec
     $vgpr1 = V_MOV_B32_e32 1, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -34,8 +34,8 @@ body:             |
 name:            mfma_write_agpr_mfma_read_same_agpr
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -47,8 +47,8 @@ body:             |
 name:            mfma_write_agpr_mfma_read_overlap
 body:             |
   bb.0:
-    $agpr1_agpr2_agpr3_agpr4 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr1_agpr2_agpr3_agpr4 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -60,8 +60,8 @@ body:             |
 name:            mfma_write_agpr_mfma_read_partial
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -75,8 +75,8 @@ body:             |
 name:            mfma_write_agpr_mfma_srca_read_overlap
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $agpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $agpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -90,8 +90,8 @@ body:             |
 name:            mfma_write_agpr_mfma_srcb_read_overlap
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $agpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $agpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -105,7 +105,7 @@ body:             |
 name:            mfma_4x4_write_agpr_accvgpr_read
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
     $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr0, implicit $exec
 ...
 ---
@@ -126,7 +126,7 @@ body:             |
 name:            mfma_16x16_write_agpr_accvgpr_read
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr0, implicit $exec
 ...
 ---
@@ -155,7 +155,7 @@ body:             |
 name:            mfma_32x32_write_agpr_accvgpr_read
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr0, implicit $exec
 ...
 ---
@@ -167,7 +167,7 @@ body:             |
 name:            mfma_4x4_write_agpr_accvgpr_write
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
 ...
 ---
@@ -185,7 +185,7 @@ body:             |
 name:            mfma_16x16_write_agpr_accvgpr_write
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
 ...
 ---
@@ -211,7 +211,7 @@ body:             |
 name:            mfma_32x32_write_agpr_accvgpr_write
 body:             |
   bb.0:
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
 ...
 ---
@@ -222,7 +222,7 @@ body:             |
 name:            mfma_4x4_read_srcc_accvgpr_write
 body:             |
   bb.0:
-    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr2, implicit $exec
 ...
 ---
@@ -238,7 +238,7 @@ body:             |
 name:            mfma_16x16_read_srcc_accvgpr_write
 body:             |
   bb.0:
-    $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_16X16X1F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr2, implicit $exec
 ...
 ---
@@ -262,7 +262,7 @@ body:             |
 name:            mfma_32x32_read_srcc_accvgpr_write
 body:             |
   bb.0:
-    $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $exec
+    $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_32X32X2F32 killed $vgpr1, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr2, implicit $exec
 ...
 ---
@@ -274,7 +274,7 @@ name:            accvgpr_read_write_vgpr_valu_read
 body:             |
   bb.0:
     $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr4, implicit $exec
-    $vgpr1 = V_ADD_F32_e32 0, killed $vgpr0, implicit $exec
+    $vgpr1 = V_ADD_F32_e32 0, killed $vgpr0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -287,7 +287,7 @@ name:            accvgpr_read_write_vgpr_mfma_read
 body:             |
   bb.0:
     $vgpr0 = V_ACCVGPR_READ_B32 killed $agpr4, implicit $exec
-    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr0, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3 = V_MFMA_F32_4X4X1F32 killed $vgpr0, killed $vgpr0, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -312,7 +312,7 @@ name:            accvgpr_write_agpr_mfma_read_srcc
 body:             |
   bb.0:
     $agpr0 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
-    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr2, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $vgpr2, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -326,7 +326,7 @@ name:            accvgpr_write_agpr_mfma_read_srca
 body:             |
   bb.0:
     $agpr8 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
-    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $agpr8, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $agpr8, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -340,7 +340,7 @@ name:            accvgpr_write_agpr_mfma_read_srcb
 body:             |
   bb.0:
     $agpr8 = V_ACCVGPR_WRITE_B32 killed $vgpr0, implicit $exec
-    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $agpr8, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $vgpr1, killed $agpr8, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 
@@ -369,7 +369,7 @@ name:            vcmpx_write_exec_mfma
 body:             |
   bb.0:
     implicit $exec, implicit $vcc = V_CMPX_EQ_I32_e32 $vgpr0, $vgpr1, implicit $exec
-    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $agpr8, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $exec
+    $agpr4_agpr5_agpr6_agpr7 = V_MFMA_F32_4X4X1F32 killed $agpr8, killed $vgpr1, killed $agpr0_agpr1_agpr2_agpr3, 0, 0, 0, implicit $mode, implicit $exec
 ...
 ---
 

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-m0.mir b/llvm/test/CodeGen/AMDGPU/merge-m0.mir
index 9c6ff0b0a628..0afc5d1cb1a1 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-m0.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-m0.mir
@@ -291,7 +291,7 @@ body:             |
   bb.0:
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
-    S_SETREG_IMM32_B32 0, 1
+    S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
     SI_INIT_M0 -1, implicit-def $m0
     DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/mode-register.mir b/llvm/test/CodeGen/AMDGPU/mode-register.mir
index a6324410b488..753e6a3ce0a7 100644
--- a/llvm/test/CodeGen/AMDGPU/mode-register.mir
+++ b/llvm/test/CodeGen/AMDGPU/mode-register.mir
@@ -17,12 +17,12 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr2
     $m0 = S_MOV_B32 killed $sgpr2
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
-    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $exec
-    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $m0, implicit $exec
-    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec
+    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -41,14 +41,14 @@ body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $sgpr2
     $m0 = S_MOV_B32 killed $sgpr2
-    S_SETREG_IMM32_B32 3, 2177
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
-    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $exec
-    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $m0, implicit $exec
-    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec
+    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -68,13 +68,13 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr2
     $m0 = S_MOV_B32 killed $sgpr2
     $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
-    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $exec
-    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $m0, implicit $exec
-    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $m0, implicit $exec
-    S_SETREG_IMM32_B32 0, 2177
-    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr1, implicit $exec
+    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $mode, implicit $m0, implicit $exec
+    S_SETREG_IMM32_B32 0, 2177, implicit-def $mode, implicit $mode
+    $vgpr0 = V_ADD_F16_e32 killed $vgpr1, killed $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -89,7 +89,7 @@ name: rtn_default
 body: |
   bb.0:
     liveins: $vgpr1_vgpr2
-    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $exec
+    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -106,8 +106,8 @@ name: rtn_from_rtz
 body: |
   bb.0:
     liveins: $vgpr1_vgpr2
-    S_SETREG_IMM32_B32 3, 2177
-    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $exec
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
+    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -122,11 +122,11 @@ body: |
   bb.0:
     successors: %bb.1
     liveins: $vgpr1_vgpr2
-    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $exec
+    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $mode, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -150,13 +150,13 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr3, $vgpr4
     $m0 = S_MOV_B32 killed $sgpr2
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     $vgpr2 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $exec
-    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $m0, implicit $exec
-    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $m0, implicit $exec
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
-    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_ADD_F16_e32 killed $sgpr0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
+    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_ADD_F16_e32 killed $sgpr0, killed $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -179,14 +179,14 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr3, $vgpr4
     $m0 = S_MOV_B32 killed $sgpr2
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
-    S_SETREG_IMM32_B32 2, 2049
-    $vgpr2 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $exec
-    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $m0, implicit $exec
-    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $m0, implicit $exec
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
-    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $m0, implicit $exec
-    $vgpr0 = V_ADD_F16_e32 killed $sgpr0, killed $vgpr0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    S_SETREG_IMM32_B32 2, 2049, implicit-def $mode, implicit $mode
+    $vgpr2 = V_MOV_B32_e32 $sgpr1, implicit $exec
+    $vgpr0 = V_INTERP_P1LL_F16 0, killed $vgpr0, 2, 1, -1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P2_F16 0, $vgpr2, 2, 1, 0, killed $vgpr1, 0, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
+    $vgpr0 = V_INTERP_P2_F16 0, killed $vgpr2, 2, 1, 0, killed $vgpr0, -1, 0, implicit $mode, implicit $m0, implicit $exec
+    $vgpr0 = V_ADD_F16_e32 killed $sgpr0, killed $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -212,13 +212,13 @@ body: |
 
   bb.1:
     successors: %bb.2
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
     successors: %bb.1, %bb.3
-    $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_CBRANCH_VCCZ %bb.1, implicit $vcc
     S_BRANCH %bb.3
 
@@ -251,7 +251,7 @@ body: |
 
   bb.2:
     successors: %bb.1, %bb.3
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
     S_CBRANCH_VCCZ %bb.1, implicit $vcc
     S_BRANCH %bb.3
 
@@ -267,7 +267,7 @@ body: |
 
   bb.5:
     successors: %bb.1, %bb.6
-    S_SETREG_IMM32_B32 3, 2177
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
     S_CBRANCH_VCCZ %bb.1, implicit $vcc
     S_BRANCH %bb.6
 
@@ -306,7 +306,7 @@ body: |
   bb.3:
     successors: %bb.1, %bb.4
     $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_CBRANCH_VCCZ %bb.1, implicit $vcc
     S_BRANCH %bb.4
 
@@ -337,12 +337,12 @@ body: |
 
   bb.2:
     successors: %bb.3
-    S_SETREG_IMM32_B32 3, 2177
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
     S_BRANCH %bb.3
 
   bb.3:
     successors: %bb.4
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
     S_BRANCH %bb.4
 
   bb.4:
@@ -373,7 +373,7 @@ body: |
 
   bb.2:
     successors: %bb.3
-    S_SETREG_IMM32_B32 3, 2177
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
     S_BRANCH %bb.3
 
   bb.3:
@@ -383,7 +383,7 @@ body: |
 
   bb.4:
     successors: %bb.5
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
     S_BRANCH %bb.5
 
   bb.5:
@@ -402,8 +402,8 @@ body: |
   bb.0:
     successors: %bb.1
     liveins: $vgpr1_vgpr2
-    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $exec
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr1_vgpr2 = V_FRACT_F64_e32 killed $vgpr1_vgpr2, implicit $mode, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
@@ -419,7 +419,7 @@ body: |
     S_BRANCH %bb.4
 
   bb.4:
-    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $m0, implicit $exec
+    $vgpr1 = V_INTERP_P1LL_F16 0, $vgpr0, 2, 1, 0, 0, 0, implicit $mode, implicit $m0, implicit $exec
     S_ENDPGM 0
 ...
 ---
@@ -446,12 +446,12 @@ body: |
 
   bb.2:
     successors: %bb.3
-    S_SETREG_IMM32_B32 3, 2177
+    S_SETREG_IMM32_B32 3, 2177, implicit-def $mode, implicit $mode
     S_BRANCH %bb.3
 
   bb.3:
     successors: %bb.4
-    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $exec
+    $vgpr3_vgpr4 = V_FRACT_F64_e32 killed $vgpr3_vgpr4, implicit $mode, implicit $exec
     S_BRANCH %bb.4
 
   bb.4:

diff  --git a/llvm/test/CodeGen/AMDGPU/movrels-bug.mir b/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
index c5575b2b7387..6d2b9ab4422a 100644
--- a/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
+++ b/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
@@ -24,7 +24,7 @@ body:             |
     V_MOVRELD_B32_e32 undef $vgpr2, 0, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4)
     $m0 = S_MOV_B32 undef $sgpr0
     $vgpr1 = V_MOVRELS_B32_e32 undef $vgpr1, implicit $m0, implicit $exec, implicit killed $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-    $vgpr4 = V_MAC_F32_e32 undef $vgpr0, undef $vgpr0, undef $vgpr4, implicit $exec
+    $vgpr4 = nofpexcept V_MAC_F32_e32 undef $vgpr0, undef $vgpr0, undef $vgpr4, implicit $mode, implicit $exec
     EXP_DONE 15, undef $vgpr0, killed $vgpr1, killed $vgpr4, undef $vgpr0, 0, 0, 12, implicit $exec
     S_ENDPGM 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/omod-nsz-flag.mir b/llvm/test/CodeGen/AMDGPU/omod-nsz-flag.mir
index 01d95ad4c70d..f8a140d73206 100644
--- a/llvm/test/CodeGen/AMDGPU/omod-nsz-flag.mir
+++ b/llvm/test/CodeGen/AMDGPU/omod-nsz-flag.mir
@@ -4,8 +4,8 @@
 
 # FIXME: Is it OK to fold omod for this?
 # GCN-LABEL: name: omod_inst_flag_nsz_src
-# GCN: %0:vgpr_32 = nsz V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $exec
-# GCN-NEXT: %1:vgpr_32 = V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $exec
+# GCN: %0:vgpr_32 = nsz nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $mode, implicit $exec
+# GCN-NEXT: %1:vgpr_32 = nofpexcept V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $mode, implicit $exec
 # GCN-NEXT: S_ENDPGM 0, implicit %1
 name: omod_inst_flag_nsz_src
 tracksRegLiveness: true
@@ -18,15 +18,15 @@ body:             |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-  %0:vgpr_32 = nsz V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $exec
-  %1:vgpr_32 = V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $exec
+  %0:vgpr_32 = nsz nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $mode, implicit $exec
+  %1:vgpr_32 = nofpexcept V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $mode, implicit $exec
   S_ENDPGM 0, implicit %1
 
 ...
 ---
 
 # GCN-LABEL: name: omod_inst_flag_nsz_result
-# GCN: %0:vgpr_32 = V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 1, implicit $exec
+# GCN: %0:vgpr_32 = nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 1, implicit $mode, implicit $exec
 # GCN-NEXT: S_ENDPGM 0, implicit %0
 
 name: omod_inst_flag_nsz_result
@@ -40,15 +40,15 @@ body:             |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-  %0:vgpr_32 = V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $exec
-  %1:vgpr_32 = nsz V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $exec
+  %0:vgpr_32 = nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $mode, implicit $exec
+  %1:vgpr_32 = nsz nofpexcept V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $mode, implicit $exec
   S_ENDPGM 0, implicit %1
 ...
 
 ---
 
 # GCN-LABEL: name: omod_inst_flag_nsz_both
-# GCN: %0:vgpr_32 = nsz V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 1, implicit $exec
+# GCN: %0:vgpr_32 = nsz nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 1, implicit $mode, implicit $exec
 # GCN-NEXT: S_ENDPGM 0, implicit %0
 
 name: omod_inst_flag_nsz_both
@@ -62,7 +62,7 @@ body:             |
   bb.0:
     liveins: $vgpr0, $vgpr1
 
-  %0:vgpr_32 = nsz V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $exec
-  %1:vgpr_32 = nsz V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $exec
+  %0:vgpr_32 = nsz nofpexcept V_ADD_F32_e64 0, $vgpr0, 0, $vgpr1, 0, 0, implicit $mode, implicit $exec
+  %1:vgpr_32 = nsz nofpexcept V_MUL_F32_e64 0, %0, 0, 1073741824, 0, 0, implicit $mode, implicit $exec
   S_ENDPGM 0, implicit %1
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/power-sched-no-instr-sunit.mir b/llvm/test/CodeGen/AMDGPU/power-sched-no-instr-sunit.mir
index 66bd4c163c66..837389d6aa7a 100644
--- a/llvm/test/CodeGen/AMDGPU/power-sched-no-instr-sunit.mir
+++ b/llvm/test/CodeGen/AMDGPU/power-sched-no-instr-sunit.mir
@@ -15,7 +15,7 @@ body:             |
     $sgpr10_sgpr11 = S_MOV_B64 $sgpr2_sgpr3, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr0_sgpr1_sgpr2_sgpr3
     $sgpr8_sgpr9 = S_MOV_B64 $sgpr0_sgpr1, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3
     S_BARRIER
-    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_32X32X1F32 undef $vgpr0, undef $vgpr0, 0, 0, 0, 2, implicit $exec
+    $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31 = V_MFMA_F32_32X32X1F32 undef $vgpr0, undef $vgpr0, 0, 0, 0, 2, implicit $mode, implicit $exec
     $vgpr0 = V_ACCVGPR_READ_B32 $agpr31, implicit $exec
     BUFFER_STORE_DWORD_OFFEN killed $vgpr0, undef $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr6, 0, 0, 0, 0, 0, 0, implicit $exec
 

diff  --git a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join-seg.mir b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join-seg.mir
index 9b1bb7f2fb7e..e4e33026da4b 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join-seg.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join-seg.mir
@@ -188,7 +188,7 @@ body:             |
     %45 = BUFFER_LOAD_DWORD_OFFEN killed %13, undef %15, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable invariant load 4)
     %46 = V_AND_B32_e32 1, killed %45, implicit $exec
     %21 = S_BUFFER_LOAD_DWORD_SGPR undef %22, undef %23, 0, 0 :: (dereferenceable invariant load 4)
-    %25 = V_CMP_GE_F32_e64 0, 0, 0, killed %21, 0, implicit $exec
+    %25 = nofpexcept V_CMP_GE_F32_e64 0, 0, 0, killed %21, 0, implicit $mode, implicit $exec
     %26 = V_CNDMASK_B32_e64 0, 0, 0, -1, killed %25, implicit $exec
     %62 = IMPLICIT_DEF
 
@@ -211,13 +211,13 @@ body:             |
     S_BRANCH %bb.31
 
   bb.30:
-    %33 = V_MAD_F32 1, killed %53.sub0, 0, undef %34, 0, 0, 0, 0, implicit $exec
-    %35 = V_MAC_F32_e32 killed %33, undef %36, undef %35, implicit $exec
-    %38 = V_MAX_F32_e32 0, killed %35, implicit $exec
-    %39 = V_LOG_F32_e32 killed %38, implicit $exec
-    %40 = V_MUL_F32_e32 killed %39, undef %41, implicit $exec
-    %42 = V_EXP_F32_e32 killed %40, implicit $exec
-    dead %43 = V_MUL_F32_e32 killed %42, undef %44, implicit $exec
+    %33 = nofpexcept V_MAD_F32 1, killed %53.sub0, 0, undef %34, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %35 = nofpexcept V_MAC_F32_e32 killed %33, undef %36, undef %35, implicit $mode, implicit $exec
+    %38 = nofpexcept V_MAX_F32_e32 0, killed %35, implicit $mode, implicit $exec
+    %39 = nofpexcept V_LOG_F32_e32 killed %38, implicit $mode, implicit $exec
+    %40 = nofpexcept V_MUL_F32_e32 killed %39, undef %41, implicit $mode, implicit $exec
+    %42 = nofpexcept V_EXP_F32_e32 killed %40, implicit $mode, implicit $exec
+    dead %43 = nofpexcept V_MUL_F32_e32 killed %42, undef %44, implicit $mode, implicit $exec
     %63 = COPY killed %51
 
   bb.31:

diff  --git a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
index ad56ba08583e..6d1df163ec82 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
@@ -113,10 +113,10 @@ body:             |
 
   bb.1:
     %30 = V_MOV_B32_e32 1036831949, implicit $exec
-    %31 = V_ADD_F32_e32 %30, %1.sub3, implicit $exec
-    %33 = V_ADD_F32_e32 %30, %1.sub2, implicit $exec
-    %35 = V_ADD_F32_e32 %30, %1.sub1, implicit $exec
-    %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $exec
+    %31 = nofpexcept V_ADD_F32_e32 %30, %1.sub3, implicit $mode, implicit $exec
+    %33 = nofpexcept V_ADD_F32_e32 %30, %1.sub2, implicit $mode, implicit $exec
+    %35 = nofpexcept V_ADD_F32_e32 %30, %1.sub1, implicit $mode, implicit $exec
+    %37 = nofpexcept V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $mode, implicit $exec
     undef %56.sub0 = COPY killed %37
     %56.sub1 = COPY killed %35
     %56.sub2 = COPY killed %33
@@ -141,10 +141,10 @@ body:             |
     %7 = COPY killed %61
     %6 = COPY killed %60
     %8 = S_ADD_I32 killed %6, 1, implicit-def dead $scc
-    %44 = V_ADD_F32_e32 %43, %7.sub3, implicit $exec
-    %46 = V_ADD_F32_e32 %43, %7.sub2, implicit $exec
-    %48 = V_ADD_F32_e32 %43, %7.sub1, implicit $exec
-    %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit $exec
+    %44 = nofpexcept V_ADD_F32_e32 %43, %7.sub3, implicit $mode, implicit $exec
+    %46 = nofpexcept V_ADD_F32_e32 %43, %7.sub2, implicit $mode, implicit $exec
+    %48 = nofpexcept V_ADD_F32_e32 %43, %7.sub1, implicit $mode, implicit $exec
+    %50 = nofpexcept V_ADD_F32_e32 %43, killed %7.sub0, implicit $mode, implicit $exec
     undef %57.sub0 = COPY killed %50
     %57.sub1 = COPY killed %48
     %57.sub2 = COPY %46

diff  --git a/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir b/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
index 96bc78cbbd54..5664c7005b5d 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
@@ -23,9 +23,9 @@ body: |
     %6 : vreg_64 = COPY killed %4
 
   bb.2:
-    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit $exec
+    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit $mode, implicit $exec
 
   bb.3:
-    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit $exec
+    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/regcoalescing-remove-partial-redundancy-assert.mir b/llvm/test/CodeGen/AMDGPU/regcoalescing-remove-partial-redundancy-assert.mir
index 9693f61a45ff..7f45c4058221 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoalescing-remove-partial-redundancy-assert.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoalescing-remove-partial-redundancy-assert.mir
@@ -11,68 +11,68 @@ tracksRegLiveness: true
 body:             |
   bb.0:
     successors: %bb.1, %bb.2
-  
-    %21:vgpr_32 = V_TRUNC_F32_e32 undef %22:vgpr_32, implicit $exec
-    %23:vgpr_32 = V_CVT_U32_F32_e32 killed %21, implicit $exec
+
+    %21:vgpr_32 = nofpexcept V_TRUNC_F32_e32 undef %22:vgpr_32, implicit $mode, implicit $exec
+    %23:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 killed %21, implicit $mode, implicit $exec
     %108:vgpr_32 = V_LSHRREV_B32_e32 4, killed %23, implicit $exec
     undef %109.sub1:vreg_128 = COPY %108
     %28:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %29:sgpr_128, 3044, 0, 0 :: (dereferenceable invariant load 4)
     S_CMP_EQ_U32 killed %28, 0, implicit-def $scc
     S_CBRANCH_SCC0 %bb.2, implicit killed $scc
-  
+
   bb.1:
     %138:vreg_128 = COPY killed %109
     S_BRANCH %bb.9
-  
+
   bb.2:
     successors: %bb.3, %bb.4
-  
+
     S_CBRANCH_SCC0 %bb.4, implicit undef $scc
-  
+
   bb.3:
     %136:vreg_128 = COPY killed %109
     S_BRANCH %bb.5
-  
+
   bb.4:
     %136:vreg_128 = COPY killed %109
-  
+
   bb.5:
     successors: %bb.6, %bb.8
-  
+
     %110:vreg_128 = COPY killed %136
     dead %32:sreg_32_xm0 = S_MOV_B32 0
     %111:vreg_128 = COPY %110
     %111.sub3:vreg_128 = COPY undef %32
     S_CBRANCH_SCC1 %bb.8, implicit undef $scc
     S_BRANCH %bb.6
-  
+
   bb.6:
     %36:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %37:sgpr_128, 2708, 0, 0 :: (dereferenceable invariant load 4)
-    %39:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %110.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 0, 0, 0, implicit $exec
-    %40:vgpr_32 = V_MAD_F32 0, %111.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 0, 0, 0, implicit $exec
-    %41:vgpr_32 = V_MUL_F32_e64 0, 0, 0, killed %40, 1, 0, implicit $exec
-    %43:vgpr_32 = V_MUL_F32_e32 0, %39, implicit $exec
+    %39:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, killed %110.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %40:vgpr_32 = nofpexcept V_MAD_F32 0, %111.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %41:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 0, 0, killed %40, 1, 0, implicit $mode, implicit $exec
+    %43:vgpr_32 = nofpexcept V_MUL_F32_e32 0, %39, implicit $mode, implicit $exec
     %44:vgpr_32 = COPY killed %43
-    %44:vgpr_32 = V_MAC_F32_e32 0, killed %41, %44, implicit $exec
+    %44:vgpr_32 = nofpexcept V_MAC_F32_e32 0, killed %41, %44, implicit $mode, implicit $exec
     %47:vgpr_32 = V_MOV_B32_e32 2143289344, implicit $exec
     %46:vgpr_32 = COPY killed %47
-    %46:vgpr_32 = V_MAC_F32_e32 0, killed %39, %46, implicit $exec
+    %46:vgpr_32 = nofpexcept V_MAC_F32_e32 0, killed %39, %46, implicit $mode, implicit $exec
     undef %115.sub0:vreg_128 = COPY %46
     %115.sub1:vreg_128 = COPY killed %46
     %115.sub2:vreg_128 = COPY killed %44
     %50:sreg_64_xexec = V_CMP_NE_U32_e64 0, killed %36, implicit $exec
     dead %118:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     %137:vreg_128 = IMPLICIT_DEF
-  
+
   bb.7:
     successors: %bb.7, %bb.8
-  
+
     %119:vreg_128 = COPY killed %137
     %121:vreg_128 = COPY killed %119
     %121.sub3:vreg_128 = COPY undef %32
-    %56:vgpr_32 = V_ADD_F32_e32 %115.sub2, %121.sub2, implicit $exec
-    %59:vgpr_32 = V_ADD_F32_e32 %115.sub1, %121.sub1, implicit $exec
-    %62:vgpr_32 = V_ADD_F32_e32 %115.sub0, killed %121.sub0, implicit $exec
+    %56:vgpr_32 = nofpexcept V_ADD_F32_e32 %115.sub2, %121.sub2, implicit $mode, implicit $exec
+    %59:vgpr_32 = nofpexcept V_ADD_F32_e32 %115.sub1, %121.sub1, implicit $mode, implicit $exec
+    %62:vgpr_32 = nofpexcept V_ADD_F32_e32 %115.sub0, killed %121.sub0, implicit $mode, implicit $exec
     undef %117.sub0:vreg_128 = COPY killed %62
     %117.sub1:vreg_128 = COPY killed %59
     %117.sub2:vreg_128 = COPY killed %56
@@ -81,118 +81,118 @@ body:             |
     %137:vreg_128 = COPY killed %117
     S_CBRANCH_VCCNZ %bb.7, implicit killed $vcc
     S_BRANCH %bb.8
-  
+
   bb.8:
     dead %66:sreg_32_xm0_xexec = S_BUFFER_LOAD_DWORD_IMM undef %67:sgpr_128, 2704, 0, 0 :: (dereferenceable invariant load 4)
     %138:vreg_128 = COPY killed %111
-  
+
   bb.9:
     %113:vreg_128 = COPY killed %138
     S_CBRANCH_SCC1 %bb.18, implicit undef $scc
     S_BRANCH %bb.10
-  
+
   bb.10:
     S_CBRANCH_SCC1 %bb.12, implicit undef $scc
     S_BRANCH %bb.11
-  
+
   bb.11:
-  
+
   bb.12:
     successors: %bb.13, %bb.18
-  
+
     S_CBRANCH_SCC1 %bb.18, implicit undef $scc
     S_BRANCH %bb.13
-  
+
   bb.13:
     successors: %bb.14, %bb.17
-  
+
     S_CBRANCH_SCC1 %bb.17, implicit undef $scc
     S_BRANCH %bb.14
-  
+
   bb.14:
     S_CBRANCH_SCC1 %bb.16, implicit undef $scc
     S_BRANCH %bb.15
-  
+
   bb.15:
-  
+
   bb.16:
-  
+
   bb.17:
-  
+
   bb.18:
     S_CBRANCH_SCC1 %bb.26, implicit undef $scc
     S_BRANCH %bb.19
-  
+
   bb.19:
     S_CBRANCH_SCC1 %bb.26, implicit undef $scc
     S_BRANCH %bb.20
-  
+
   bb.20:
     S_CBRANCH_SCC1 %bb.25, implicit undef $scc
     S_BRANCH %bb.21
-  
+
   bb.21:
     successors: %bb.22, %bb.24
-  
+
     S_CBRANCH_SCC1 %bb.24, implicit undef $scc
     S_BRANCH %bb.22
-  
+
   bb.22:
     successors: %bb.23, %bb.24
-  
+
     S_CBRANCH_SCC1 %bb.24, implicit undef $scc
     S_BRANCH %bb.23
-  
+
   bb.23:
-  
+
   bb.24:
-  
+
   bb.25:
-  
+
   bb.26:
     S_CBRANCH_SCC1 %bb.33, implicit undef $scc
     S_BRANCH %bb.27
-  
+
   bb.27:
     S_CBRANCH_SCC1 %bb.33, implicit undef $scc
     S_BRANCH %bb.28
-  
+
   bb.28:
     dead %77:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-    %78:vgpr_32 = nnan arcp contract reassoc V_MAD_F32 0, killed %113.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 1065353216, 0, 0, implicit $exec
+    %78:vgpr_32 = nnan arcp contract reassoc nofpexcept V_MAD_F32 0, killed %113.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 0, 1065353216, 0, 0, implicit $mode, implicit $exec
     dead %80:sreg_32_xm0 = S_MOV_B32 0
-    dead %82:vgpr_32 = V_MUL_F32_e32 killed %78, %78, implicit $exec
+    dead %82:vgpr_32 = nofpexcept V_MUL_F32_e32 killed %78, %78, implicit $mode, implicit $exec
     dead %126:vgpr_32 = V_MOV_B32_e32 2143289344, implicit $exec
     dead %125:vreg_128 = IMPLICIT_DEF
     dead %91:sreg_32_xm0 = S_MOV_B32 2143289344
     %96:sreg_64 = S_AND_B64 $exec, 0, implicit-def dead $scc
     %139:vreg_128 = IMPLICIT_DEF
-  
+
   bb.29:
     successors: %bb.30, %bb.31
-  
+
     dead %127:vreg_128 = COPY killed %139
     S_CBRANCH_SCC0 %bb.31, implicit undef $scc
-  
+
   bb.30:
     S_BRANCH %bb.32
-  
+
   bb.31:
     successors: %bb.32, %bb.34
-  
+
     $vcc = COPY %96
     S_CBRANCH_VCCNZ %bb.34, implicit killed $vcc
     S_BRANCH %bb.32
-  
+
   bb.32:
     dead %130:vreg_128 = IMPLICIT_DEF
     dead %128:vreg_128 = COPY undef %130
     %139:vreg_128 = IMPLICIT_DEF
     S_BRANCH %bb.29
-  
+
   bb.33:
     S_ENDPGM 0
-  
+
   bb.34:
     S_ENDPGM 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
index d7892a0c9759..d03f60cc6835 100644
--- a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
+++ b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
@@ -2,7 +2,7 @@
 ---
 
 # GCN-LABEL: name: mac_invalid_operands
-# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit $exec
+# GCN: undef %18.sub0:vreg_128 = nofpexcept V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit $mode, implicit $exec
 
 name:            mac_invalid_operands
 alignment:       1
@@ -38,14 +38,14 @@ body:             |
   bb.0:
     successors: %bb.2, %bb.1
 
-    %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit $exec
+    %7 = nofpexcept V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit $mode, implicit $exec
     $vcc = COPY killed %7
     S_CBRANCH_VCCZ %bb.2, implicit killed $vcc
 
   bb.1:
     successors: %bb.3
 
-    %4 = V_ADD_F32_e32 undef %6, undef %5, implicit $exec
+    %4 = nofpexcept V_ADD_F32_e32 undef %6, undef %5, implicit $mode, implicit $exec
     undef %12.sub0 = COPY killed %4
     %17 = COPY killed %12
     S_BRANCH %bb.3
@@ -53,7 +53,7 @@ body:             |
   bb.2:
     successors: %bb.3
 
-    %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit $exec
+    %8 = nofpexcept V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit $mode, implicit $exec
     undef %13.sub0 = COPY %8
     %13.sub1 = COPY %8
     %13.sub2 = COPY killed %8
@@ -77,13 +77,13 @@ body:             |
 
 # GCN-LABEL: name: vreg_does_not_dominate
 
-# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit $exec
+# GCN: undef %8.sub1:vreg_128 = nofpexcept V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit $mode, implicit $exec
 # GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
 # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0
 
-# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit $exec
-# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
-# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit $exec
+# GCN: undef %6.sub3:vreg_128 = nofpexcept V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit $mode, implicit $exec
+# GCN: undef %7.sub0:vreg_128 = nofpexcept V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+# GCN: %8.sub1:vreg_128 = nofpexcept V_ADD_F32_e32 %8.sub1, %8.sub1, implicit $mode, implicit $exec
 
 # GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0,
 # GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0,
@@ -117,7 +117,7 @@ body:             |
 
     %5 = COPY $sgpr30_sgpr31
     %0 = COPY $vgpr0
-    undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit $exec
+    undef %6.sub1 = nofpexcept V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit $mode, implicit $exec
     %6.sub0 = V_MOV_B32_e32 0, implicit $exec
     %6.sub2 = COPY %6.sub0
     S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
@@ -126,9 +126,9 @@ body:             |
   bb.1:
     successors: %bb.2
 
-    %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit $exec
-    %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
-    %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit $exec
+    %6.sub3 = nofpexcept V_ADD_F32_e32 undef %3, undef %3, implicit $mode, implicit $exec
+    %6.sub0 = nofpexcept V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %6.sub1 = nofpexcept V_ADD_F32_e32 %6.sub1, %6.sub1, implicit $mode, implicit $exec
     %6.sub2 = COPY %6.sub0
 
   bb.2:
@@ -143,7 +143,7 @@ body:             |
 
 # GCN-LABEL: name: inf_loop_tied_operand
 # GCN: bb.0:
-# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit $exec
+# GCN-NEXT: undef %2.sub0:vreg_128 = nofpexcept V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit $mode, implicit $exec
 # GCN-NEXT: dead undef %3.sub1:vreg_128 = COPY %2.sub0
 
 name:            inf_loop_tied_operand
@@ -154,7 +154,7 @@ registers:
   - { id: 2, class: vreg_128, preferred-register: '' }
 body:             |
   bb.0:
-    %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit $exec
+    %1 = nofpexcept V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit $mode, implicit $exec
     undef %2.sub0 = COPY %1
     %2.sub1 = COPY %1
 

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir b/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir
index 88cb57ca0cdc..fd435d4adbe6 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-assert-onlydbg-value-empty-region.mir
@@ -33,13 +33,13 @@ body:             |
   ; CHECK:   [[DEF4:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[DEF5:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   [[COPY1:%[0-9]+]]:vreg_64 = COPY [[GLOBAL_LOAD_DWORDX2_]]
-  ; CHECK:   undef %6.sub0:vreg_64 = V_ADD_F32_e32 [[DEF]].sub0, [[COPY1]].sub0, implicit $exec
-  ; CHECK:   dead undef %6.sub1:vreg_64 = V_ADD_F32_e32 [[DEF]].sub1, [[COPY1]].sub0, implicit $exec
+  ; CHECK:   undef %6.sub0:vreg_64 = V_ADD_F32_e32 [[DEF]].sub0, [[COPY1]].sub0, implicit $mode, implicit $exec
+  ; CHECK:   dead undef %6.sub1:vreg_64 = V_ADD_F32_e32 [[DEF]].sub1, [[COPY1]].sub0, implicit $mode, implicit $exec
   ; CHECK:   [[DEF6:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
   ; CHECK:   [[GLOBAL_LOAD_DWORD1:%[0-9]+]]:vgpr_32 = GLOBAL_LOAD_DWORD [[COPY1]], 0, 0, 0, 0, implicit $exec
-  ; CHECK:   undef %19.sub0:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD1]], [[GLOBAL_LOAD_DWORDX2_]].sub0, implicit $exec
+  ; CHECK:   undef %19.sub0:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD1]], [[GLOBAL_LOAD_DWORDX2_]].sub0, implicit $mode, implicit $exec
   ; CHECK:   [[DEF7:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
-  ; CHECK:   %19.sub1:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD]], [[GLOBAL_LOAD_DWORD]], implicit $exec
+  ; CHECK:   %19.sub1:vreg_64 = V_ADD_F32_e32 [[GLOBAL_LOAD_DWORD]], [[GLOBAL_LOAD_DWORD]], implicit $mode, implicit $exec
   ; CHECK:   [[DEF8:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
   ; CHECK:   %4.sub1:vreg_64 = V_ADD_U32_e32 [[COPY]], [[COPY]], implicit $exec
   ; CHECK:   GLOBAL_STORE_DWORDX2 %19, %4, 32, 0, 0, 0, implicit $exec
@@ -55,11 +55,11 @@ body:             |
   ; CHECK:   GLOBAL_STORE_DWORD [[DEF7]], [[V_MOV_B32_e32_1]], 0, 0, 0, 0, implicit $exec
   ; CHECK: bb.1:
   ; CHECK:   successors: %bb.2(0x80000000)
-  ; CHECK:   S_SETREG_IMM32_B32 0, 1
+  ; CHECK:   S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
   ; CHECK:   DBG_VALUE
   ; CHECK:   DBG_VALUE
   ; CHECK:   DBG_VALUE
-  ; CHECK:   S_SETREG_IMM32_B32 0, 1
+  ; CHECK:   S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
   ; CHECK: bb.2:
   ; CHECK:   S_NOP 0, implicit [[COPY]]
   ; CHECK:   S_NOP 0, implicit [[DEF8]]
@@ -74,8 +74,8 @@ body:             |
     undef %4.sub1:vreg_64 = V_ADD_U32_e32 %0, %0, implicit $exec
     %4.sub0:vreg_64 = V_MOV_B32_e32 111, implicit $exec
     %5:vreg_64 = COPY %2
-    undef %6.sub0:vreg_64 = V_ADD_F32_e32 %1.sub0, %5.sub0, implicit $exec
-    %6.sub1:vreg_64 = V_ADD_F32_e32 %1.sub1, %5.sub0, implicit $exec
+    undef %6.sub0:vreg_64 = V_ADD_F32_e32 %1.sub0, %5.sub0, implicit $mode, implicit $exec
+    %6.sub1:vreg_64 = V_ADD_F32_e32 %1.sub1, %5.sub0, implicit $mode, implicit $exec
     %7:vgpr_32 = GLOBAL_LOAD_DWORD %5, 0, 0, 0, 0, implicit $exec
     %8:vreg_64 = IMPLICIT_DEF
     %9:vreg_64 = IMPLICIT_DEF
@@ -88,8 +88,8 @@ body:             |
     %16:vgpr_32 = IMPLICIT_DEF
     %17:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     %18:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-    undef %19.sub0:vreg_64 = V_ADD_F32_e32 %7, %2.sub0, implicit $exec
-    %19.sub1:vreg_64 = V_ADD_F32_e32 %3, %3, implicit $exec
+    undef %19.sub0:vreg_64 = V_ADD_F32_e32 %7, %2.sub0, implicit $mode, implicit $exec
+    %19.sub1:vreg_64 = V_ADD_F32_e32 %3, %3, implicit $mode, implicit $exec
     GLOBAL_STORE_DWORDX2 %19, %4, 32, 0, 0, 0, implicit $exec
     %11.sub0:vreg_64 = GLOBAL_LOAD_DWORD %9, 0, 0, 0, 0, implicit $exec
     %8.sub0:vreg_64 = GLOBAL_LOAD_DWORD %10, 0, 0, 0, 0, implicit $exec
@@ -101,11 +101,11 @@ body:             |
     GLOBAL_STORE_DWORD %15, %18, 0, 0, 0, 0, implicit $exec
 
   bb.1:
-    S_SETREG_IMM32_B32 0, 1
+    S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
     DBG_VALUE
     DBG_VALUE
     DBG_VALUE
-    S_SETREG_IMM32_B32 0, 1
+    S_SETREG_IMM32_B32 0, 1, implicit-def $mode, implicit $mode
 
   bb.2:
     S_NOP 0, implicit %0

diff  --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
index 9d3144196eb1..aac40b73a41e 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
@@ -275,7 +275,7 @@ body:             |
     %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, 0, implicit $exec
     %77:vgpr_32 = IMPLICIT_DEF
     %78:vgpr_32 = IMPLICIT_DEF
-    %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit $exec
+    %79:vgpr_32 = nofpexcept V_MUL_F32_e32 0, %77, implicit $mode, implicit $exec
     %80:vgpr_32 = IMPLICIT_DEF
     %81:vgpr_32 = IMPLICIT_DEF
     %84:vgpr_32 = IMPLICIT_DEF
@@ -288,9 +288,9 @@ body:             |
     %87:vgpr_32 = IMPLICIT_DEF
     %88:vgpr_32 = IMPLICIT_DEF
     %90:vgpr_32 = IMPLICIT_DEF
-    %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit $exec
-    %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit $exec
-    %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit $exec
+    %91:vgpr_32, dead %92:sreg_64 = nofpexcept V_DIV_SCALE_F32 %90, %90, 1065353216, implicit $mode, implicit $exec
+    %95:vgpr_32 = nofpexcept V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit $mode, implicit $exec
+    %96:vgpr_32, %97:sreg_64 = nofpexcept V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit $mode, implicit $exec
     %98:vgpr_32 = IMPLICIT_DEF
     %99:vgpr_32 = IMPLICIT_DEF
     %100:vgpr_32 = IMPLICIT_DEF
@@ -299,18 +299,18 @@ body:             |
     %103:vgpr_32 = IMPLICIT_DEF
     %104:vgpr_32 = IMPLICIT_DEF
     %105:vgpr_32 = IMPLICIT_DEF
-    %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit $exec
-    %108:vgpr_32 = V_RCP_F32_e32 0, implicit $exec
+    %106:vgpr_32, dead %107:sreg_64 = nofpexcept V_DIV_SCALE_F32 %90, %90, %105, implicit $mode, implicit $exec
+    %108:vgpr_32 = nofpexcept V_RCP_F32_e32 0, implicit $mode, implicit $exec
     %109:vgpr_32 = IMPLICIT_DEF
-    %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec
-    %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit $exec
-    %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit $exec
+    %110:vgpr_32 = nofpexcept V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %111:vgpr_32, %112:sreg_64 = nofpexcept V_DIV_SCALE_F32 0, 0, 0, implicit $mode, implicit $exec
+    %113:vgpr_32 = nofpexcept V_MUL_F32_e32 0, %110, implicit $mode, implicit $exec
     %114:vgpr_32 = IMPLICIT_DEF
     %115:vgpr_32 = IMPLICIT_DEF
     %116:vgpr_32 = IMPLICIT_DEF
     $vcc = IMPLICIT_DEF
-    %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed $vcc, implicit $exec
-    %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit $exec
+    %117:vgpr_32 = nofpexcept V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed $vcc, implicit $mode, implicit $exec
+    %118:vgpr_32 = nofpexcept V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit $mode, implicit $exec
     %119:vgpr_32 = IMPLICIT_DEF
     %120:vgpr_32 = IMPLICIT_DEF
     %121:vgpr_32 = IMPLICIT_DEF

diff  --git a/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir b/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
index b4ac7cf8732c..192bce362c4f 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
@@ -50,12 +50,12 @@ body:             |
 # GCN-LABEL: {{^}}name: trunc_shr_f32
 
 # CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
-# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
+# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit $mode, implicit $exec, implicit-def $vcc
 
 # VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit $mode, implicit $exec, implicit-def $vcc
 
-#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit $mode, implicit $exec
 
 ---
 name:            trunc_shr_f32
@@ -82,7 +82,7 @@ body:             |
     %0 = COPY $vgpr0_vgpr1
     %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
     %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
-    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def $vcc, implicit $exec
+    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit $mode, implicit $exec, implicit-def $vcc
     FLAT_STORE_DWORD %0, %11, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
     $sgpr30_sgpr31 = COPY %2
     S_SETPC_B64_return $sgpr30_sgpr31

diff  --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
index bd518924bb58..688e039b1664 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr-gfx10.mir
@@ -3,21 +3,21 @@
 # GCN-LABEL: {{^}}name: vop1_instructions
 
 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
 
 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
 
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $mode, implicit $exec
 
 ---
 name:            vop1_instructions
@@ -88,43 +88,43 @@ body:             |
     %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
     %11 = V_MOV_B32_e32 %10, implicit $exec
     %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
-    %14 = V_FRACT_F32_e32 123, implicit $exec
+    %14 = V_FRACT_F32_e32 123, implicit $mode, implicit $exec
     %15 = V_LSHLREV_B32_e64 16, %14, implicit $exec
     %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
-    %17 = V_SIN_F32_e32 %16, implicit $exec
+    %17 = V_SIN_F32_e32 %16, implicit $mode, implicit $exec
     %18 = V_LSHLREV_B32_e64 16, %17, implicit $exec
     %19 = V_LSHRREV_B32_e64 16, %18, implicit $exec
-    %20 = V_CVT_U32_F32_e32 %19, implicit $exec
+    %20 = V_CVT_U32_F32_e32 %19, implicit $mode, implicit $exec
     %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
-    %23 = V_CVT_F32_I32_e32 123, implicit $exec
+    %23 = V_CVT_F32_I32_e32 123, implicit $mode, implicit $exec
     %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
 
     %25 = V_LSHRREV_B32_e64 16, %3, implicit $exec
     %26 = V_MOV_B32_e64 %25, implicit $exec
     %26 = V_LSHLREV_B32_e64 16, %26, implicit $exec
-    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $exec
+    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $mode, implicit $exec
     %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
     %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
-    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $exec
+    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $mode, implicit $exec
     %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
     %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
-    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $exec
+    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $mode, implicit $exec
     %34 = V_LSHLREV_B32_e64 16, %33, implicit $exec
-    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $exec
+    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $mode, implicit $exec
     %36 = V_LSHLREV_B32_e64 16, %35, implicit $exec
 
 
     %37 = V_LSHRREV_B32_e64 16, %36, implicit $exec
-    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $exec
+    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $mode, implicit $exec
     %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
     %40 = V_LSHRREV_B32_e64 16, %39, implicit $exec
-    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $exec
+    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $mode, implicit $exec
     %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
     %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
-    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $exec
+    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $mode, implicit $exec
     %45 = V_LSHLREV_B32_e64 16, %44, implicit $exec
     %46 = V_LSHRREV_B32_e64 16, %45, implicit $exec
-    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $exec
+    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $mode, implicit $exec
     %48 = V_LSHLREV_B32_e64 16, %47, implicit $exec
 
 
@@ -139,21 +139,21 @@ body:             |
 # GCN-LABEL: {{^}}name: vop2_instructions
 
 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
 
 # GFX1010: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $mode, implicit $exec
 
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $exec
-# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $mode, implicit $exec
+# GFX1010: %{{[0-9]+}}:vgpr_32 = V_FMAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $mode, implicit $exec
 
 name:            vop2_instructions
 tracksRegLiveness: true
@@ -237,18 +237,18 @@ body:             |
     %13 = V_LSHLREV_B32_e64 16, %12, implicit $exec
     %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
     %15 = V_BFE_U32 %13, 8, 8, implicit $exec
-    %16 = V_ADD_F32_e32 %14, %15, implicit $exec
+    %16 = V_ADD_F32_e32 %14, %15, implicit $mode, implicit $exec
     %17 = V_LSHLREV_B32_e64 16, %16, implicit $exec
     %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
     %19 = V_BFE_U32 %17, 8, 8, implicit $exec
-    %20 = V_SUB_F16_e32 %18, %19, implicit $exec
+    %20 = V_SUB_F16_e32 %18, %19, implicit $mode, implicit $exec
     %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
     %22 = V_BFE_U32 %20, 8, 8, implicit $exec
-    %23 = V_FMAC_F32_e32 %21, %22, %22, implicit $exec
+    %23 = V_FMAC_F32_e32 %21, %22, %22, implicit $mode, implicit $exec
     %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
     %25 = V_LSHRREV_B32_e64 16, %24, implicit $exec
     %26 = V_BFE_U32 %24, 8, 8, implicit $exec
-    %27 = V_FMAC_F16_e32 %25, %26, %26, implicit $exec
+    %27 = V_FMAC_F16_e32 %25, %26, %26, implicit $mode, implicit $exec
     %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
 
     %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
@@ -256,32 +256,32 @@ body:             |
     %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
     %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
     %33 = V_BFE_U32 %31, 8, 8, implicit $exec
-    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $exec
+    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $mode, implicit $exec
     %35 = V_LSHLREV_B32_e64 16, %34, implicit $exec
     %37 = V_BFE_U32 %35, 8, 8, implicit $exec
-    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $exec
+    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $mode, implicit $exec
     %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
     %40 = V_BFE_U32 %39, 8, 8, implicit $exec
-    %41 = V_FMAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $exec
+    %41 = V_FMAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $mode, implicit $exec
     %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
     %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
     %44 = V_BFE_U32 %42, 8, 8, implicit $exec
-    %45 = V_FMAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $exec
+    %45 = V_FMAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $mode, implicit $exec
     %46 = V_LSHLREV_B32_e64 16, %45, implicit $exec
 
     %47 = V_LSHRREV_B32_e64 16, %46, implicit $exec
     %48 = V_BFE_U32 %46, 8, 8, implicit $exec
-    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $exec
+    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $mode, implicit $exec
     %50 = V_LSHLREV_B32_e64 16, %49, implicit $exec
     %51 = V_BFE_U32 %50, 8, 8, implicit $exec
-    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $exec
+    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $mode, implicit $exec
     %53 = V_LSHLREV_B32_e64 16, %52, implicit $exec
     %54 = V_BFE_U32 %53, 8, 8, implicit $exec
-    %55 = V_FMAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $exec
+    %55 = V_FMAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $mode, implicit $exec
     %56 = V_LSHLREV_B32_e64 16, %55, implicit $exec
     %57 = V_LSHRREV_B32_e64 16, %56, implicit $exec
     %58 = V_BFE_U32 %56, 8, 8, implicit $exec
-    %59 = V_FMAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $exec
+    %59 = V_FMAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $mode, implicit $exec
     %60 = V_LSHLREV_B32_e64 16, %59, implicit $exec
 
     %100 = V_MOV_B32_e32 %60, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
index 8ba20b4a66dd..fa55e1be8a3f 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -4,28 +4,28 @@
 # GFX89-LABEL: {{^}}name: vop1_instructions
 
 # GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
 
 
 # GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $mode, implicit $exec
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit $mode, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $mode, implicit $exec
 
 
 ---
@@ -97,43 +97,43 @@ body:             |
     %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
     %11 = V_MOV_B32_e32 %10, implicit $exec
     %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
-    %14 = V_FRACT_F32_e32 123, implicit $exec
+    %14 = V_FRACT_F32_e32 123, implicit $mode, implicit $exec
     %15 = V_LSHLREV_B32_e64 16, %14, implicit $exec
     %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
-    %17 = V_SIN_F32_e32 %16, implicit $exec
+    %17 = V_SIN_F32_e32 %16, implicit $mode, implicit $exec
     %18 = V_LSHLREV_B32_e64 16, %17, implicit $exec
     %19 = V_LSHRREV_B32_e64 16, %18, implicit $exec
-    %20 = V_CVT_U32_F32_e32 %19, implicit $exec
+    %20 = V_CVT_U32_F32_e32 %19, implicit $mode, implicit $exec
     %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
-    %23 = V_CVT_F32_I32_e32 123, implicit $exec
+    %23 = V_CVT_F32_I32_e32 123, implicit $mode, implicit $exec
     %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
 
     %25 = V_LSHRREV_B32_e64 16, %3, implicit $exec
     %26 = V_MOV_B32_e64 %25, implicit $exec
     %26 = V_LSHLREV_B32_e64 16, %26, implicit $exec
-    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $exec
+    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $mode, implicit $exec
     %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
     %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
-    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $exec
+    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $mode, implicit $exec
     %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
     %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
-    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $exec
+    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $mode, implicit $exec
     %34 = V_LSHLREV_B32_e64 16, %33, implicit $exec
-    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $exec
+    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $mode, implicit $exec
     %36 = V_LSHLREV_B32_e64 16, %35, implicit $exec
 
 
     %37 = V_LSHRREV_B32_e64 16, %36, implicit $exec
-    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $exec
+    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $mode, implicit $exec
     %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
     %40 = V_LSHRREV_B32_e64 16, %39, implicit $exec
-    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $exec
+    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $mode, implicit $exec
     %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
     %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
-    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $exec
+    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $mode, implicit $exec
     %45 = V_LSHLREV_B32_e64 16, %44, implicit $exec
     %46 = V_LSHRREV_B32_e64 16, %45, implicit $exec
-    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $exec
+    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $mode, implicit $exec
     %48 = V_LSHLREV_B32_e64 16, %47, implicit $exec
 
 
@@ -149,40 +149,40 @@ body:             |
 
 
 # VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
 
 # GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $mode, implicit $exec
 
 
 # VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
 
 # GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $mode, implicit $exec
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit $exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $mode, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $mode, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $mode, implicit $exec
 
 name:            vop2_instructions
 tracksRegLiveness: true
@@ -266,18 +266,18 @@ body:             |
     %13 = V_LSHLREV_B32_e64 16, %12, implicit $exec
     %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
     %15 = V_BFE_U32 %13, 8, 8, implicit $exec
-    %16 = V_ADD_F32_e32 %14, %15, implicit $exec
+    %16 = V_ADD_F32_e32 %14, %15, implicit $mode, implicit $exec
     %17 = V_LSHLREV_B32_e64 16, %16, implicit $exec
     %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
     %19 = V_BFE_U32 %17, 8, 8, implicit $exec
-    %20 = V_SUB_F16_e32 %18, %19, implicit $exec
+    %20 = V_SUB_F16_e32 %18, %19, implicit $mode, implicit $exec
     %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
     %22 = V_BFE_U32 %20, 8, 8, implicit $exec
-    %23 = V_MAC_F32_e32 %21, %22, %22, implicit $exec
+    %23 = V_MAC_F32_e32 %21, %22, %22, implicit $mode, implicit $exec
     %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
     %25 = V_LSHRREV_B32_e64 16, %24, implicit $exec
     %26 = V_BFE_U32 %24, 8, 8, implicit $exec
-    %27 = V_MAC_F16_e32 %25, %26, %26, implicit $exec
+    %27 = V_MAC_F16_e32 %25, %26, %26, implicit $mode, implicit $exec
     %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
 
     %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
@@ -285,32 +285,32 @@ body:             |
     %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
     %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
     %33 = V_BFE_U32 %31, 8, 8, implicit $exec
-    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $exec
+    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $mode, implicit $exec
     %35 = V_LSHLREV_B32_e64 16, %34, implicit $exec
     %37 = V_BFE_U32 %35, 8, 8, implicit $exec
-    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $exec
+    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $mode, implicit $exec
     %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
     %40 = V_BFE_U32 %39, 8, 8, implicit $exec
-    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $exec
+    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $mode, implicit $exec
     %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
     %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
     %44 = V_BFE_U32 %42, 8, 8, implicit $exec
-    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $exec
+    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $mode, implicit $exec
     %46 = V_LSHLREV_B32_e64 16, %45, implicit $exec
 
     %47 = V_LSHRREV_B32_e64 16, %46, implicit $exec
     %48 = V_BFE_U32 %46, 8, 8, implicit $exec
-    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $exec
+    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $mode, implicit $exec
     %50 = V_LSHLREV_B32_e64 16, %49, implicit $exec
     %51 = V_BFE_U32 %50, 8, 8, implicit $exec
-    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $exec
+    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $mode, implicit $exec
     %53 = V_LSHLREV_B32_e64 16, %52, implicit $exec
     %54 = V_BFE_U32 %53, 8, 8, implicit $exec
-    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $exec
+    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $mode, implicit $exec
     %56 = V_LSHLREV_B32_e64 16, %55, implicit $exec
     %57 = V_LSHRREV_B32_e64 16, %56, implicit $exec
     %58 = V_BFE_U32 %56, 8, 8, implicit $exec
-    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $exec
+    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $mode, implicit $exec
     %60 = V_LSHLREV_B32_e64 16, %59, implicit $exec
 
     %100 = V_MOV_B32_e32 %60, implicit $exec
@@ -325,40 +325,40 @@ body:             |
 # GCN-LABEL: {{^}}name: vopc_instructions
 
 # GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
-# GFX89: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
-# GFX89: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX89: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $mode, implicit $exec
+# GFX89: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
 # GFX89: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
 # GFX89: $vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
 
 
-# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
-# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $mode, implicit $exec
+# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def $exec, implicit $mode, implicit $exec
 # VI: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def $vcc, implicit $exec
 # VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def $exec, implicit $exec
 
-# GFX9: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX9: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $mode, implicit $exec
 # GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
-# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
 # GFX9: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
 # GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
 # GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
 
 
-# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
-# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
-# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $mode, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $mode, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
 
-# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
-# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
-# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
-# GFX9: $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $mode, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def $exec, implicit $mode, implicit $exec
 
 
 name:            vopc_instructions
@@ -406,37 +406,37 @@ body:             |
     %6 = S_MOV_B32 65535
 
     %10 = V_AND_B32_e64 %5, %3, implicit $exec
-    V_CMP_EQ_F32_e32 123, killed %10, implicit-def $vcc, implicit $exec
+    V_CMP_EQ_F32_e32 123, killed %10, implicit-def $vcc, implicit $mode, implicit $exec
     %11 = V_AND_B32_e64 %5, %3, implicit $exec
-    V_CMPX_GT_F32_e32 123, killed %11, implicit-def $vcc, implicit-def $exec, implicit $exec
+    V_CMPX_GT_F32_e32 123, killed %11, implicit-def $vcc, implicit $mode, implicit-def $exec, implicit $exec
     %12 = V_AND_B32_e64 %5, %3, implicit $exec
     V_CMP_LT_I32_e32 123, killed %12, implicit-def $vcc, implicit $exec
     %13 = V_AND_B32_e64 %5, %3, implicit $exec
     V_CMPX_EQ_I32_e32 123, killed %13, implicit-def $vcc, implicit-def $exec, implicit $exec
 
     %14 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit $mode, implicit $exec
     %15 = V_AND_B32_e64 %5, %3, implicit $exec
-    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def $exec, implicit $exec
+    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def $exec, implicit $mode, implicit $exec
     %16 = V_AND_B32_e64 %5, %3, implicit $exec
     $vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit $exec
     %17 = V_AND_B32_e64 %5, %3, implicit $exec
     %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def $exec, implicit $exec
 
     %20 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit $mode, implicit $exec
     %21 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def $exec, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def $exec, implicit $mode, implicit $exec
     %23 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit $mode, implicit $exec
     %24 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def $exec, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def $exec, implicit $mode, implicit $exec
     %25 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def $exec, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def $exec, implicit $mode, implicit $exec
     %26 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def $exec, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def $exec, implicit $mode, implicit $exec
     %27 = V_AND_B32_e64 %5, %3, implicit $exec
-    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def $exec, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def $exec, implicit $mode, implicit $exec
 
 
     %100 = V_MOV_B32_e32 $vcc_lo, implicit $exec
@@ -447,7 +447,7 @@ body:             |
 ...
 
 # GCN-LABEL: name: preserve_flags
-# GCN: = nnan nofpexcept V_ADD_F32_sdwa 0, %4, 0, %4, 0, 0, 6, 0, 5, 1, implicit $exec
+# GCN: = nnan nofpexcept V_ADD_F32_sdwa 0, %4, 0, %4, 0, 0, 6, 0, 5, 1, implicit $mode, implicit $exec
 
 ---
 name: preserve_flags

diff  --git a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
index e91268364a65..33802ad21fdd 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
@@ -14,7 +14,7 @@
 ---
 name:            add_f16_u32_preserve
 tracksRegLiveness: true
-registers:       
+registers:
   - { id: 0, class: vreg_64 }
   - { id: 1, class: vreg_64 }
   - { id: 2, class: sreg_64 }
@@ -32,7 +32,7 @@ registers:
 body:             |
   bb.0:
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
-  
+
     %2 = COPY $sgpr30_sgpr31
     %1 = COPY $vgpr2_vgpr3
     %0 = COPY $vgpr0_vgpr1
@@ -44,9 +44,9 @@ body:             |
     %7 = V_BFE_U32 %3, 8, 8, implicit $exec
     %8 = V_LSHRREV_B32_e32 24, %4, implicit $exec
 
-    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $exec
+    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $mode, implicit $exec
     %10 = V_LSHLREV_B16_e64 8, %9, implicit $exec
-    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $exec
+    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $mode, implicit $exec
     %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
 
     %13 = V_OR_B32_e64 %10, %12, implicit $exec

diff  --git a/llvm/test/CodeGen/AMDGPU/shrink-instructions-flags.mir b/llvm/test/CodeGen/AMDGPU/shrink-instructions-flags.mir
index b8c36bc77148..6a4e942e07a9 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-instructions-flags.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-instructions-flags.mir
@@ -14,7 +14,7 @@ body:             |
     ; CHECK: liveins: $vgpr0, $vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: %2:vgpr_32 = nnan nofpexcept V_ADD_F32_e32 [[COPY]], [[COPY1]], implicit $exec
+    ; CHECK: %2:vgpr_32 = nnan nofpexcept V_ADD_F32_e32 [[COPY]], [[COPY1]], implicit $mode, implicit $exec
     ; CHECK: S_NOP 0
     %0:vgpr_32 = COPY $vgpr0
     %1:vgpr_32 = COPY $vgpr0

diff  --git a/llvm/test/CodeGen/AMDGPU/smem-war-hazard.mir b/llvm/test/CodeGen/AMDGPU/smem-war-hazard.mir
index 531cca11e47b..a3747ac6ac42 100644
--- a/llvm/test/CodeGen/AMDGPU/smem-war-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/smem-war-hazard.mir
@@ -10,7 +10,7 @@ body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -25,7 +25,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr4, $sgpr5, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     $sgpr3 = S_ADD_U32 $sgpr4, $sgpr5, implicit-def $scc
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -42,7 +42,7 @@ body: |
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 0
     $sgpr3 = S_ADD_U32 $sgpr2, $sgpr4, implicit-def $scc
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -59,7 +59,7 @@ body: |
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 0
     $sgpr3 = S_ADD_U32 $sgpr5, $sgpr4, implicit-def $scc
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -75,7 +75,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $sgpr6, $sgpr7, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     $sgpr5 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0, 0
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -90,7 +90,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 0
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -106,7 +106,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 3952
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -122,7 +122,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 53007
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -137,7 +137,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT 49279
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -152,7 +152,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT_LGKMCNT $sgpr_null, 0
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -168,7 +168,7 @@ body: |
     liveins: $sgpr0, $sgpr1, $vgpr0, $vgpr1
     $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0, 0
     S_WAITCNT_LGKMCNT $sgpr_null, 1
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -187,7 +187,7 @@ body: |
 
   bb.1:
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0, $vgpr1
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -212,12 +212,12 @@ body: |
 
   bb.1:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4, $sgpr5, $vgpr0, $vgpr1
-    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 
   bb.2:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4, $sgpr5, $vgpr0, $vgpr1
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -244,17 +244,17 @@ body: |
 
   bb.1:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4, $sgpr5, $vgpr0, $vgpr1
-    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 
   bb.2:
     successors: %bb.3
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4, $sgpr5, $vgpr0, $vgpr1
-    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr4_sgpr5 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
 
   bb.3:
     liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr4, $sgpr5, $vgpr0, $vgpr1
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -268,7 +268,7 @@ body: |
   bb.0:
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0, $vgpr1
     successors: %bb.1
-    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_F32_e64 0, $vgpr0, 0, $vgpr1, 1, implicit $mode, implicit $exec
 
   bb.1:
     liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0, $vgpr1
@@ -286,7 +286,7 @@ body: |
   bb.0:
     liveins: $vcc, $vgpr0
     $sgpr0 = S_LOAD_DWORD_IMM $vcc, 0, 0, 0
-    V_CMP_EQ_F32_e32 $vgpr0, $vgpr0, implicit-def $vcc, implicit $exec
+    V_CMP_EQ_F32_e32 $vgpr0, $vgpr0, implicit-def $vcc, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index fd0debda403c..2be645954aed 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -7,10 +7,10 @@
 
 # CHECK-LABEL: name: expecting_non_empty_interval
 
-# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $exec
+# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $mode, implicit $exec
 # CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, implicit $exec :: (store 8 into %stack.0, align 4, addrspace 5)
 # CHECK-NEXT: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec
-# CHECK-NEXT: dead %3:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $exec
+# CHECK-NEXT: dead %3:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $mode, implicit $exec
 
 # CHECK: S_NOP 0, implicit %6.sub1
 # CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr32, 0, implicit $exec :: (load 8 from %stack.0, align 4, addrspace 5)
@@ -26,9 +26,9 @@ body:             |
   bb.0:
     successors: %bb.1
 
-    undef %0.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %0.sub1, implicit $exec
+    undef %0.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %0.sub1, implicit $mode, implicit $exec
     undef %2.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec
-    dead %3:vgpr_32 = V_MUL_F32_e32 0, %2.sub1, implicit $exec
+    dead %3:vgpr_32 = V_MUL_F32_e32 0, %2.sub1, implicit $mode, implicit $exec
 
   bb.1:
     S_NOP 0, implicit %2.sub1

diff  --git a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
index 1bac81699edd..0fa0ddab4e11 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
@@ -85,10 +85,10 @@ body: |
   bb.6:
     successors: %bb.8(0x40000000), %bb.11(0x40000000)
     %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-    dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $exec
-    dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $exec
-    undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $exec
-    undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec
+    dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $mode, implicit $exec
+    dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $mode, implicit $exec
+    undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $mode, implicit $exec
+    undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $mode, implicit $exec
     undef %11.sub0:sgpr_256 = S_MOV_B32 0
     %11.sub1:sgpr_256 = COPY %11.sub0
     %11.sub2:sgpr_256 = COPY %11.sub0
@@ -161,31 +161,31 @@ body: |
   bb.13:
     successors: %bb.15(0x40000000), %bb.14(0x40000000)
 
-    %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $exec
-    %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $exec
+    %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $mode, implicit $exec
+    %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     %20:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %21:sgpr_128, 1040, 0, 0 :: (dereferenceable invariant load 16)
-    %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $exec
-    %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $exec
+    %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $mode, implicit $exec
+    %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     %24:vgpr_32 = COPY %20.sub3
-    %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $exec
+    %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $mode, implicit $exec
     %26:sgpr_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %27:sgpr_128, 1056, 0, 0 :: (dereferenceable invariant load 16)
-    %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $exec
-    %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $exec
-    %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $exec
-    %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $exec
-    %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $exec
-    %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $exec
-    %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $exec
-    %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $exec
-    %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $exec
-    %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $exec
-    %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $exec
-    %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $exec
+    %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $mode, implicit $exec
+    %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $mode, implicit $exec
+    %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $mode, implicit $exec
+    %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $mode, implicit $exec
+    %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $mode, implicit $exec
+    %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $mode, implicit $exec
+    %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $mode, implicit $exec
+    %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $mode, implicit $exec
+    %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $mode, implicit $exec
+    %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $mode, implicit $exec
+    %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
     %38:sreg_64_xexec = V_CMP_NE_U32_e64 0, %5, implicit $exec
     %39:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %38, implicit $exec
     V_CMP_NE_U32_e32 1, %39, implicit-def $vcc, implicit $exec
     $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
-    %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $exec
+    %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $mode, implicit $exec
     S_CBRANCH_VCCZ %bb.15, implicit $vcc
 
   bb.14:
@@ -194,9 +194,9 @@ body: |
 
   bb.15:
     successors: %bb.16(0x40000000), %bb.18(0x40000000)
-    %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $exec
-    %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $exec
-    %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $exec
+    %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $mode, implicit $exec
+    %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $mode, implicit $exec
     %44:sreg_64 = S_AND_B64 %43, %43, implicit-def dead $scc
     %45:sreg_64 = S_AND_B64 %42, %42, implicit-def dead $scc
     %46:sreg_64 = S_AND_B64 %45, %44, implicit-def dead $scc
@@ -222,15 +222,15 @@ body: |
   bb.18:
     successors: %bb.20(0x40000000), %bb.19(0x40000000)
     $exec = S_OR_B64 $exec, %47, implicit-def $scc
-    %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $exec
-    %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $exec
-    %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $exec
-    %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $exec
-    %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $exec
-    %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $exec
+    %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $mode, implicit $exec
+    %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $mode, implicit $exec
+    %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $mode, implicit $exec
+    %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $mode, implicit $exec
+    %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $mode, implicit $exec
+    %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $mode, implicit $exec
     %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
     %56:vgpr_32 = V_MOV_B32_e32 981668463, implicit $exec
-    %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $exec
+    %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $mode, implicit $exec
     %58:sreg_64 = S_AND_B64 $exec, %57, implicit-def dead $scc
     $vcc = COPY %58
     S_CBRANCH_VCCZ %bb.20, implicit $vcc
@@ -255,8 +255,8 @@ body: |
 
   bb.23:
     successors: %bb.22(0x80000000)
-    undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $exec
-    %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $exec
+    undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $mode, implicit $exec
+    %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $mode, implicit $exec
     undef %61.sub0:sgpr_256 = S_MOV_B32 0
     %61.sub1:sgpr_256 = COPY %61.sub0
     %61.sub2:sgpr_256 = COPY %61.sub0
@@ -266,20 +266,20 @@ body: |
     %61.sub6:sgpr_256 = COPY %61.sub0
     %61.sub7:sgpr_256 = COPY %61.sub0
     %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec
-    %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $exec
-    %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $exec
+    %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $mode, implicit $exec
+    %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $mode, implicit $exec
     %64:vgpr_32 = IMAGE_LOAD_V1_V2 %60, %61, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
-    %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $exec
-    %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $exec
-    %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $exec
-    %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $exec
-    %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $exec
-    %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $exec
+    %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $mode, implicit $exec
+    %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $mode, implicit $exec
+    %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $mode, implicit $exec
+    %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $mode, implicit $exec
+    %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $mode, implicit $exec
+    %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $mode, implicit $exec
     S_BRANCH %bb.22
 
   bb.24:
-    %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $exec
-    %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, 0, implicit $exec
+    %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $mode, implicit $exec
+    %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, 0, implicit $mode, implicit $exec
     EXP 0, undef %71:vgpr_32, %69, undef %72:vgpr_32, undef %73:vgpr_32, -1, -1, 15, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/twoaddr-fma.mir b/llvm/test/CodeGen/AMDGPU/twoaddr-fma.mir
index b0aeb91787cd..343864c4cd67 100644
--- a/llvm/test/CodeGen/AMDGPU/twoaddr-fma.mir
+++ b/llvm/test/CodeGen/AMDGPU/twoaddr-fma.mir
@@ -1,7 +1,7 @@
 # RUN: llc -march=amdgcn -mcpu=gfx1010 %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: test_fmamk_reg_imm_f32
-# GCN: V_FMAMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_FMAMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_fmamk_reg_imm_f32
 registers:
@@ -15,12 +15,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_FMAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $exec
+    %3 = V_FMAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmamk_imm_reg_f32
-# GCN: V_FMAMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_FMAMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_fmamk_imm_reg_f32
 registers:
@@ -34,12 +34,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_FMAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $exec
+    %3 = V_FMAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_f32
-# GCN: V_FMAAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
+# GCN: V_FMAAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $mode, implicit $exec
 ---
 name:            test_fmaak_f32
 registers:
@@ -51,12 +51,12 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_FMAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
+    %2 = V_FMAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmamk_reg_imm_f16
-# GCN: V_FMAMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_FMAMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_fmamk_reg_imm_f16
 registers:
@@ -70,12 +70,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_FMAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $exec
+    %3 = V_FMAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmamk_imm_reg_f16
-# GCN: V_FMAMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_FMAMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_fmamk_imm_reg_f16
 registers:
@@ -89,12 +89,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_FMAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $exec
+    %3 = V_FMAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_f16
-# GCN: V_FMAAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
+# GCN: V_FMAAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $mode, implicit $exec
 ---
 name:            test_fmaak_f16
 registers:
@@ -106,11 +106,11 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_FMAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
+    %2 = V_FMAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $mode, implicit $exec
 ...
 
 # GCN-LABEL: name: test_fmaak_sgpr_src0_f32
-# GCN: %2:vgpr_32 = V_FMAMK_F32 killed %0, 1078523331, %3:vgpr_32, implicit $exec
+# GCN: %2:vgpr_32 = V_FMAMK_F32 killed %0, 1078523331, %3:vgpr_32, implicit $mode, implicit $exec
 
 ---
 name:            test_fmaak_sgpr_src0_f32
@@ -124,12 +124,12 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_FMAC_F32_e32 killed %0, %1, %3, implicit $exec
+    %2 = V_FMAC_F32_e32 killed %0, %1, %3, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_inlineimm_src0_f32
-# GCN: %1:vgpr_32 = V_FMAMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $exec
+# GCN: %1:vgpr_32 = V_FMAMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $mode, implicit $exec
 
 ---
 name:            test_fmaak_inlineimm_src0_f32
@@ -141,12 +141,12 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_FMAC_F32_e32 1073741824, %0, %2, implicit $exec
+    %1 = V_FMAC_F32_e32 1073741824, %0, %2, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_otherimm_src0_f32
-# GCN: %1:vgpr_32 = V_FMAC_F32_e32 1120403456, %0, %1, implicit $exec
+# GCN: %1:vgpr_32 = V_FMAC_F32_e32 1120403456, %0, %1, implicit $mode, implicit $exec
 
 ---
 name:            test_fmaak_otherimm_src0_f32
@@ -158,12 +158,12 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_FMAC_F32_e32 1120403456, %0, %2, implicit $exec
+    %1 = V_FMAC_F32_e32 1120403456, %0, %2, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_other_constantlike_src0_f32
-# GCN: %1:vgpr_32 = V_FMAC_F32_e32 %stack.0, %0, %1, implicit $exec
+# GCN: %1:vgpr_32 = V_FMAC_F32_e32 %stack.0, %0, %1, implicit $mode, implicit $exec
 ---
 name:            test_fmaak_other_constantlike_src0_f32
 registers:
@@ -178,12 +178,12 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_FMAC_F32_e32 %stack.0, %0, %2, implicit $exec
+    %1 = V_FMAC_F32_e32 %stack.0, %0, %2, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_fmaak_inline_literal_f16
-# GCN: %2:vgpr_32 = V_FMAAK_F16 16384, killed %0, 49664, implicit $exec
+# GCN: %2:vgpr_32 = V_FMAAK_F16 16384, killed %0, 49664, implicit $mode, implicit $exec
 
 ---
 name:            test_fmaak_inline_literal_f16
@@ -192,11 +192,11 @@ liveins:
 body:             |
   bb.0:
     liveins: $vgpr0
-  
+
     %3:vgpr_32 = COPY killed $vgpr0
 
     %26:vgpr_32 = V_MOV_B32_e32 49664, implicit $exec
-    %28:vgpr_32 = V_FMAC_F16_e32 16384, killed %3, %26, implicit $exec
+    %28:vgpr_32 = V_FMAC_F16_e32 16384, killed %3, %26, implicit $mode, implicit $exec
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir b/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
index e75194bf4b8f..75949e6a2476 100644
--- a/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
+++ b/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
@@ -1,7 +1,7 @@
 # RUN: llc -march=amdgcn -mcpu=gfx900 %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: test_madmk_reg_imm_f32
-# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_madmk_reg_imm_f32
 registers:
@@ -15,12 +15,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $exec
+    %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_imm_reg_f32
-# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_madmk_imm_reg_f32
 registers:
@@ -34,12 +34,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $exec
+    %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madak_f32
-# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
+# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $mode, implicit $exec
 ---
 name:            test_madak_f32
 registers:
@@ -51,12 +51,12 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
+    %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_reg_imm_f16
-# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_madmk_reg_imm_f16
 registers:
@@ -70,12 +70,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $exec
+    %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_imm_reg_f16
-# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
+# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $mode, implicit $exec
 ---
 name:            test_madmk_imm_reg_f16
 registers:
@@ -89,12 +89,12 @@ body:             |
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
     %2 = V_MOV_B32_e32 1078523331, implicit $exec
-    %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $exec
+    %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madak_f16
-# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
+# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $mode, implicit $exec
 ---
 name:            test_madak_f16
 registers:
@@ -106,14 +106,14 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
+    %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $mode, implicit $exec
 ...
 
 # Make sure constant bus restriction isn't violated if src0 is an SGPR.
 
 # GCN-LABEL: name: test_madak_sgpr_src0_f32
 # GCN: %1:vgpr_32 = V_MOV_B32_e32 1078523331, implicit $exec
-# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit $exec
+# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit $mode, implicit $exec
 
 ---
 name:            test_madak_sgpr_src0_f32
@@ -127,14 +127,14 @@ body:             |
 
     %0 = IMPLICIT_DEF
     %1 = V_MOV_B32_e32 1078523331, implicit $exec
-    %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit $exec
+    %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit $mode, implicit $exec
 
 ...
 
 # This can still fold if this is an inline immediate.
 
 # GCN-LABEL: name: test_madak_inlineimm_src0_f32
-# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $exec
+# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $mode, implicit $exec
 
 ---
 name:            test_madak_inlineimm_src0_f32
@@ -146,13 +146,13 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit $exec
+    %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit $mode, implicit $exec
 
 ...
 # Non-inline immediate uses constant bus already.
 
 # GCN-LABEL: name: test_madak_otherimm_src0_f32
-# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit $exec
+# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit $mode, implicit $exec
 
 ---
 name:            test_madak_otherimm_src0_f32
@@ -164,13 +164,13 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit $exec
+    %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit $mode, implicit $exec
 
 ...
 # Non-inline immediate uses constant bus already.
 
 # GCN-LABEL: name: test_madak_other_constantlike_src0_f32
-# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit $exec
+# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit $mode, implicit $exec
 ---
 name:            test_madak_other_constantlike_src0_f32
 registers:
@@ -185,12 +185,12 @@ body:             |
   bb.0:
 
     %0 = V_MOV_B32_e32 1078523331, implicit $exec
-    %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit $exec
+    %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit $mode, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madak_inline_literal_f16
-# GCN: %2:vgpr_32 = V_MADAK_F16 16384, killed %0, 49664, implicit $exec
+# GCN: %2:vgpr_32 = V_MADAK_F16 16384, killed %0, 49664, implicit $mode, implicit $exec
 
 ---
 name:            test_madak_inline_literal_f16
@@ -199,11 +199,11 @@ liveins:
 body:             |
   bb.0:
     liveins: $vgpr0
-  
+
     %3:vgpr_32 = COPY killed $vgpr0
 
     %26:vgpr_32 = V_MOV_B32_e32 49664, implicit $exec
-    %28:vgpr_32 = V_MAC_F16_e32 16384, killed %3, %26, implicit $exec
+    %28:vgpr_32 = V_MAC_F16_e32 16384, killed %3, %26, implicit $mode, implicit $exec
     S_ENDPGM 0
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir b/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
index 9f36e0b5d685..3190641ae691 100644
--- a/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
+++ b/llvm/test/CodeGen/AMDGPU/v_swap_b32.mir
@@ -66,7 +66,7 @@ body:             |
 # GCN-LABEL: name: swap_phys_overlap_x
 # GCN: bb.0:
 # GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
-# GCN-NEXT: $vgpr3_vgpr4 = V_ADD_F64 0, $vgpr0_vgpr1, 0, $vgpr3_vgpr4, 0, 0, implicit $exec
+# GCN-NEXT: $vgpr3_vgpr4 = V_ADD_F64 0, $vgpr0_vgpr1, 0, $vgpr3_vgpr4, 0, 0, implicit $mode, implicit $exec
 # GCN-NEXT: $vgpr0 = V_MOV_B32_e32 killed $vgpr1, implicit $exec
 # GCN-NEXT: $vgpr1 = V_MOV_B32_e32 killed $vgpr2, implicit $exec
 ---
@@ -74,7 +74,7 @@ name:            swap_phys_overlap_x
 body:             |
   bb.0:
     $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
-    $vgpr3_vgpr4 = V_ADD_F64 0, $vgpr0_vgpr1, 0, $vgpr3_vgpr4, 0, 0, implicit $exec
+    $vgpr3_vgpr4 = V_ADD_F64 0, $vgpr0_vgpr1, 0, $vgpr3_vgpr4, 0, 0, implicit $mode, implicit $exec
     $vgpr0 = V_MOV_B32_e32 killed $vgpr1, implicit $exec
     $vgpr1 = V_MOV_B32_e32 killed $vgpr2, implicit $exec
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index 20c21aae6c75..55453c7d263a 100644
--- a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -17,7 +17,7 @@ body: |
     $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0, 0
     $sgpr7 = S_MOV_B32 61440
     $sgpr6 = S_MOV_B32 -1
-    $vcc = V_CMP_EQ_F32_e64 0, 0, 0, undef $sgpr2, 0, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, 0, 0, undef $sgpr2, 0, implicit $mode, implicit $exec
     S_CBRANCH_VCCZ %bb.1, implicit killed $vcc
 
   bb.2:

diff  --git a/llvm/test/CodeGen/AMDGPU/vcmpx-permlane-hazard.mir b/llvm/test/CodeGen/AMDGPU/vcmpx-permlane-hazard.mir
index 5b98a82216d7..96b6eaf1967d 100644
--- a/llvm/test/CodeGen/AMDGPU/vcmpx-permlane-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/vcmpx-permlane-hazard.mir
@@ -117,7 +117,7 @@ body:            |
     $vgpr1 = IMPLICIT_DEF
     $sgpr0 = IMPLICIT_DEF
     $sgpr1 = IMPLICIT_DEF
-    $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1,  implicit $exec
+    $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1,  implicit $mode, implicit $exec
     $vgpr1 = V_PERMLANE16_B32 0, killed $vgpr1, 0, killed $sgpr1, 0, killed $sgpr0, $vgpr1, 0, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/vmem-to-salu-hazard.mir b/llvm/test/CodeGen/AMDGPU/vmem-to-salu-hazard.mir
index 1f30d91753a5..761ef6054b81 100644
--- a/llvm/test/CodeGen/AMDGPU/vmem-to-salu-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/vmem-to-salu-hazard.mir
@@ -94,7 +94,7 @@ body:             |
     $sgpr4 = IMPLICIT_DEF
     $vgpr0 = IMPLICIT_DEF
     $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, 0, 0, implicit $exec
-    $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $exec
+    $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
     $sgpr0 = S_MOV_B32 0
 ...
 # GCN-LABEL: name: vmem_swait0_write_sgpr

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
index e3b56d3f9619..0e6aecb7ad70 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir
@@ -3,7 +3,7 @@
 # GCN-LABEL: waitcnt-back-edge-loop
 # GCN: bb.2
 # GCN: S_WAITCNT 112
-# GCN: $vgpr5 = V_CVT_I32_F32_e32 killed $vgpr5, implicit $exec
+# GCN: $vgpr5 = V_CVT_I32_F32_e32 killed $vgpr5, implicit $mode, implicit $exec
 
 ---
 name: waitcnt-back-edge-loop
@@ -28,7 +28,7 @@ body:             |
   bb.1:
     successors: %bb.5, %bb.2
 
-    $vgpr5 = V_CVT_I32_F32_e32 killed $vgpr5, implicit $exec
+    $vgpr5 = V_CVT_I32_F32_e32 killed $vgpr5, implicit $mode, implicit $exec
     V_CMP_NE_U32_e32 0, $vgpr5, implicit-def $vcc, implicit $exec
     $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
     S_CBRANCH_VCCZ %bb.5, implicit killed $vcc
@@ -44,7 +44,7 @@ body:             |
     successors: %bb.3, %bb.1
 
     $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4 from `float addrspace(1)* null`, addrspace 1)
-    $vgpr4 = V_CVT_I32_F32_e32 $vgpr5, implicit $exec
+    $vgpr4 = V_CVT_I32_F32_e32 $vgpr5, implicit $mode, implicit $exec
     V_CMP_EQ_U32_e32 2, killed $vgpr4, implicit-def $vcc, implicit $exec
     $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc
     $vgpr4 = V_MOV_B32_e32 $vgpr5, implicit $exec, implicit $exec
@@ -53,7 +53,7 @@ body:             |
 
   bb.5:
 
-    $vgpr4 = V_MAC_F32_e32 killed $vgpr0, killed $vgpr3, killed $vgpr4, implicit $exec
+    $vgpr4 = V_MAC_F32_e32 killed $vgpr0, killed $vgpr3, killed $vgpr4, implicit $mode, implicit $exec
     EXP_DONE 12, killed $vgpr4, undef $vgpr0, undef $vgpr0, undef $vgpr0, 0, 0, 15, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-overflow.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-overflow.mir
index eae38031047f..e06e3031c3d2 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-overflow.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-overflow.mir
@@ -16,21 +16,21 @@ body:             |
     ; GFX9: $vgpr34_vgpr35 = DS_READ2_B32_gfx9 renamable $vgpr99, 34, 35, 0, implicit $exec
     ; GFX9-NOT: S_WAITCNT 53119
     ; GFX9-NEXT: S_WAITCNT 52863
-    ; GFX9-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
-    ; GFX9-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $exec
-    ; GFX9-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $exec
-    ; GFX9-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $exec
+    ; GFX9-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    ; GFX9-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
+    ; GFX9-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $mode, implicit $exec
+    ; GFX9-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $mode, implicit $exec
     ; GFX9-NEXT: S_ENDPGM 0
     ; GFX10-LABEL: name: max-counter-lgkmcnt
     ; GFX10: $vgpr34_vgpr35 = DS_READ2_B32_gfx9 renamable $vgpr99, 34, 35, 0, implicit $exec
     ; GFX10-NEXT: S_WAITCNT 53631
-    ; GFX10-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
+    ; GFX10-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
     ; GFX10-NEXT: S_WAITCNT 53375
-    ; GFX10-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $exec
+    ; GFX10-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
     ; GFX10-NEXT: S_WAITCNT 53119
-    ; GFX10-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $exec
+    ; GFX10-NEXT: $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $mode, implicit $exec
     ; GFX10-NEXT: S_WAITCNT 52863
-    ; GFX10-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $exec
+    ; GFX10-NEXT: $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $mode, implicit $exec
     ; GFX10-NEXT: S_ENDPGM 0
     $vgpr0_vgpr1 = DS_READ2_B32_gfx9 renamable $vgpr99, 0, 1, 0, implicit $exec
     $vgpr2_vgpr3 = DS_READ2_B32_gfx9 renamable $vgpr99, 2, 3, 0, implicit $exec
@@ -50,10 +50,10 @@ body:             |
     $vgpr30_vgpr31 = DS_READ2_B32_gfx9 renamable $vgpr99, 30, 31, 0, implicit $exec
     $vgpr32_vgpr33 = DS_READ2_B32_gfx9 renamable $vgpr99, 32, 33, 0, implicit $exec
     $vgpr34_vgpr35 = DS_READ2_B32_gfx9 renamable $vgpr99, 34, 35, 0, implicit $exec
-    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
-    $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $exec
-    $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $exec
-    $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $exec
+    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
+    $vgpr4 = V_MAC_F32_e32 0, $vgpr5, $vgpr4, implicit $mode, implicit $exec
+    $vgpr6 = V_MAC_F32_e32 0, $vgpr7, $vgpr6, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -70,10 +70,10 @@ body:             |
     ; GFX10-NOT: S_WAITCNT 65407
     ; GFX9-NEXT: S_WAITCNT 53118
     ; GFX10-NEXT: S_WAITCNT 65406
-    ; GFX9_10-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
-    ; GFX9_10-NEXT: $vgpr1 = V_MAC_F32_e32 0, $vgpr2, $vgpr1, implicit $exec
-    ; GFX9_10-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $exec
-    ; GFX9_10-NEXT: $vgpr3 = V_MAC_F32_e32 0, $vgpr4, $vgpr3, implicit $exec
+    ; GFX9_10-NEXT: $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    ; GFX9_10-NEXT: $vgpr1 = V_MAC_F32_e32 0, $vgpr2, $vgpr1, implicit $mode, implicit $exec
+    ; GFX9_10-NEXT: $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
+    ; GFX9_10-NEXT: $vgpr3 = V_MAC_F32_e32 0, $vgpr4, $vgpr3, implicit $mode, implicit $exec
     ; GFX9_10-NEXT: S_ENDPGM 0
     $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, 0, 0, implicit $exec
     $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, 0, 0, 0, implicit $exec
@@ -142,10 +142,10 @@ body:             |
     $vgpr64 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 256, 0, 0, 0, 0, 0, implicit $exec
     $vgpr65 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 260, 0, 0, 0, 0, 0, implicit $exec
     $vgpr66 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 264, 0, 0, 0, 0, 0, implicit $exec
-    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
-    $vgpr1 = V_MAC_F32_e32 0, $vgpr2, $vgpr1, implicit $exec
-    $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $exec
-    $vgpr3 = V_MAC_F32_e32 0, $vgpr4, $vgpr3, implicit $exec
+    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
+    $vgpr1 = V_MAC_F32_e32 0, $vgpr2, $vgpr1, implicit $mode, implicit $exec
+    $vgpr2 = V_MAC_F32_e32 0, $vgpr3, $vgpr2, implicit $mode, implicit $exec
+    $vgpr3 = V_MAC_F32_e32 0, $vgpr4, $vgpr3, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...
 
@@ -167,6 +167,6 @@ body:             |
     EXP 0, $vgpr1, $vgpr1, $vgpr1, $vgpr1, -1, -1, 15, implicit $exec
     EXP 0, $vgpr1, $vgpr1, $vgpr1, $vgpr1, -1, -1, 15, implicit $exec
     EXP 0, $vgpr1, $vgpr1, $vgpr1, $vgpr1, -1, -1, 15, implicit $exec
-    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $exec
+    $vgpr0 = V_MAC_F32_e32 0, $vgpr1, $vgpr0, implicit $mode, implicit $exec
     S_ENDPGM 0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
index 7a70ae5464dd..2f453c6156b2 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
@@ -15,7 +15,7 @@ body:             |
     liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
 
     $vgpr0 = DS_BPERMUTE_B32 killed $vgpr0, killed $vgpr1, 0, implicit $exec
-    $vgpr0 = V_ADD_F32_e32 1065353216, killed $vgpr0, implicit $exec
+    $vgpr0 = V_ADD_F32_e32 1065353216, killed $vgpr0, implicit $mode, implicit $exec
     S_SETPC_B64_return killed $sgpr30_sgpr31, implicit killed $vgpr0
 
 ...

diff  --git a/llvm/unittests/MI/LiveIntervalTest.cpp b/llvm/unittests/MI/LiveIntervalTest.cpp
index 6faa8abd4cd8..ea8476db1e65 100644
--- a/llvm/unittests/MI/LiveIntervalTest.cpp
+++ b/llvm/unittests/MI/LiveIntervalTest.cpp
@@ -432,12 +432,12 @@ TEST(LiveIntervalTest, DeadSubRegMoveUp) {
     %54:vgpr_32 = V_MOV_B32_e32 1742342378, implicit $exec
     %57:vgpr_32 = V_MOV_B32_e32 3168768712, implicit $exec
     %59:vgpr_32 = V_MOV_B32_e32 1039972644, implicit $exec
-    %60:vgpr_32 = V_MAD_F32 0, %52, 0, undef %61:vgpr_32, 0, %59, 0, 0, implicit $exec
-    %63:vgpr_32 = V_ADD_F32_e32 %51.sub3, undef %64:vgpr_32, implicit $exec
-    dead %66:vgpr_32 = V_MAD_F32 0, %60, 0, undef %67:vgpr_32, 0, %125.sub2, 0, 0, implicit $exec
-    undef %124.sub1:vreg_128 = V_MAD_F32 0, %57, 0, undef %70:vgpr_32, 0, %125.sub1, 0, 0, implicit $exec
-    %124.sub0:vreg_128 = V_MAD_F32 0, %54, 0, undef %73:vgpr_32, 0, %125.sub0, 0, 0, implicit $exec
-    dead undef %125.sub3:vreg_128 = V_MAC_F32_e32 %63, undef %76:vgpr_32, %125.sub3, implicit $exec
+    %60:vgpr_32 = nofpexcept V_MAD_F32 0, %52, 0, undef %61:vgpr_32, 0, %59, 0, 0, implicit $mode, implicit $exec
+    %63:vgpr_32 = nofpexcept V_ADD_F32_e32 %51.sub3, undef %64:vgpr_32, implicit $mode, implicit $exec
+    dead %66:vgpr_32 = nofpexcept V_MAD_F32 0, %60, 0, undef %67:vgpr_32, 0, %125.sub2, 0, 0, implicit $mode, implicit $exec
+    undef %124.sub1:vreg_128 = nofpexcept V_MAD_F32 0, %57, 0, undef %70:vgpr_32, 0, %125.sub1, 0, 0, implicit $mode, implicit $exec
+    %124.sub0:vreg_128 = nofpexcept V_MAD_F32 0, %54, 0, undef %73:vgpr_32, 0, %125.sub0, 0, 0, implicit $mode, implicit $exec
+    dead undef %125.sub3:vreg_128 = nofpexcept V_MAC_F32_e32 %63, undef %76:vgpr_32, %125.sub3, implicit $mode, implicit $exec
 )MIR", [](MachineFunction &MF, LiveIntervals &LIS) {
     testHandleMove(MF, LIS, 15, 12);
   });


        


More information about the llvm-commits mailing list