[llvm] 3b5d4aa - GlobalISel: Infer nofpexcept flag during selection for non-strict ops

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 5 10:59:54 PDT 2020


Author: Matt Arsenault
Date: 2020-06-05T13:59:46-04:00
New Revision: 3b5d4aa258a0f7ccbd5c3ade4286dca8f5d2d984

URL: https://github.com/llvm/llvm-project/commit/3b5d4aa258a0f7ccbd5c3ade4286dca8f5d2d984
DIFF: https://github.com/llvm/llvm-project/commit/3b5d4aa258a0f7ccbd5c3ade4286dca8f5d2d984.diff

LOG: GlobalISel: Infer nofpexcept flag during selection for non-strict ops

Match SelectionDAG's behavior of adding nofpexcept to out instructions
that may raise fp exceptions that are selected from instructions that
do not.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
    llvm/test/CodeGen/X86/GlobalISel/select-add.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
    llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
index cb48122047f1..73ac578d61be 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h
@@ -57,7 +57,11 @@ bool InstructionSelector::executeMatchTable(
 
   uint64_t CurrentIdx = 0;
   SmallVector<uint64_t, 4> OnFailResumeAt;
-  uint16_t Flags = State.MIs[0]->getFlags();
+
+  // Bypass the flag check on the instruction, and only look at the MCInstrDesc.
+  bool NoFPException = !State.MIs[0]->getDesc().mayRaiseFPException();
+
+  const uint16_t Flags = State.MIs[0]->getFlags();
 
   enum RejectAction { RejectAndGiveUp, RejectAndResume };
   auto handleReject = [&]() -> RejectAction {
@@ -72,11 +76,15 @@ bool InstructionSelector::executeMatchTable(
     return RejectAndResume;
   };
 
-  auto propagateFlags = [&](NewMIVector &OutMIs) {
-    if (Flags == MachineInstr::MIFlag::NoFlags)
-      return false;
-    for (auto MIB : OutMIs)
-      MIB.setMIFlags(Flags);
+  auto propagateFlags = [=](NewMIVector &OutMIs) {
+    for (auto MIB : OutMIs) {
+      // Set the NoFPExcept flag when no original matched instruction could
+      // raise an FP exception, but the new instruction potentially might.
+      uint16_t MIBFlags = Flags;
+      if (NoFPException && MIB->mayRaiseFPException())
+        MIBFlags |= MachineInstr::NoFPExcept;
+      MIB.setMIFlags(MIBFlags);
+    }
 
     return true;
   };

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
index d010f7f44160..1f6d80a4a724 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.mir
@@ -15,8 +15,8 @@ body: |
     ; CHECK-LABEL: name: cos_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_COS_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %0
     S_ENDPGM 0, implicit %1
@@ -35,8 +35,8 @@ body: |
     ; CHECK-LABEL: name: cos_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_COS_F32_e64_:%[0-9]+]]:vgpr_32 = V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_COS_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_COS_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
index 86b782b27e82..2cf44526e30d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cos.s16.mir
@@ -5,6 +5,7 @@
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %1:sgpr(s16) (in function: cos_s16_vs)
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %1:vgpr(s16) (in function: cos_s16_vv)
 
+---
 name: cos_s16_vs
 legalized: true
 regBankSelected: true
@@ -17,7 +18,7 @@ body: |
     ; CHECK-LABEL: name: cos_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_COS_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,8 +39,8 @@ body: |
     ; CHECK-LABEL: name: cos_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_COS_F16_e64_:%[0-9]+]]:vgpr_32 = V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_COS_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_COS_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
index 2eef865fc85b..3a2f326ad7fd 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir
@@ -15,8 +15,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1
@@ -37,8 +37,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1
@@ -58,8 +58,8 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_CVT_PKRTZ_F16_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_CVT_PKRTZ_F16_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_PKRTZ_F16_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
index 7ffb25190770..09b22465e8b5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fmed3.s16.mir
@@ -21,8 +21,8 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_MED3_F16_]]
+    ; GCN: %6:vgpr_32 = nofpexcept V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %6
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -48,8 +48,8 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_MED3_F16_:%[0-9]+]]:vgpr_32 = V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_MED3_F16_]]
+    ; GCN: %6:vgpr_32 = nofpexcept V_MED3_F16 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %6
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
index 8d62c2c49049..7498ee0053ee 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.mir
@@ -15,8 +15,8 @@ body: |
     ; CHECK-LABEL: name: fract_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
     S_ENDPGM 0, implicit %1
@@ -35,8 +35,8 @@ body: |
     ; CHECK-LABEL: name: fract_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FRACT_F32_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_FRACT_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
     S_ENDPGM 0, implicit %1
@@ -55,8 +55,8 @@ body: |
     ; CHECK-LABEL: name: fract_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
     S_ENDPGM 0, implicit %1
@@ -75,8 +75,8 @@ body: |
     ; CHECK-LABEL: name: fract_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FRACT_F64_e64_:%[0-9]+]]:vreg_64 = V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_FRACT_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
index 8360aee9a83e..7a3af8226074 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.fract.s16.mir
@@ -19,8 +19,8 @@ body: |
     ; CHECK-LABEL: name: fract_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %1
@@ -40,8 +40,8 @@ body: |
     ; CHECK-LABEL: name: fract_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FRACT_F16_e64_:%[0-9]+]]:vgpr_32 = V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_FRACT_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_FRACT_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
index 0fc2582f983f..5e2eb2185954 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.mir
@@ -14,8 +14,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
@@ -35,8 +35,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
@@ -56,8 +56,8 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_LDEXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_LDEXP_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
@@ -77,8 +77,8 @@ body: |
     ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
+    ; GCN: %2:vreg_64 = nofpexcept V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
@@ -98,8 +98,8 @@ body: |
     ; GCN: liveins: $sgpr0_sgpr1, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
+    ; GCN: %2:vreg_64 = nofpexcept V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1
@@ -119,8 +119,8 @@ body: |
     ; GCN: liveins: $vgpr0_vgpr1, $vgpr2
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GCN: [[V_LDEXP_F64_:%[0-9]+]]:vreg_64 = V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F64_]]
+    ; GCN: %2:vreg_64 = nofpexcept V_LDEXP_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = COPY $vgpr2
     %2:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.ldexp), %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
index 1a620849f336..24d35a7887e5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.ldexp.s16.mir
@@ -19,8 +19,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %3
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:sgpr(s16) = G_TRUNC %0
@@ -41,8 +41,8 @@ body: |
     ; GCN: liveins: $sgpr0, $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s16) = G_TRUNC %0
@@ -63,8 +63,8 @@ body: |
     ; GCN: liveins: $vgpr0, $vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GCN: [[V_LDEXP_F16_e64_:%[0-9]+]]:vgpr_32 = V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_LDEXP_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_LDEXP_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
index 3dbed8a7b5dd..03402025c8f3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.legacy.mir
@@ -20,8 +20,8 @@ body: |
     ; CHECK-LABEL: name: rcp_legacy_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_LEGACY_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp.legacy), %0
     S_ENDPGM 0, implicit %1
@@ -40,8 +40,8 @@ body: |
     ; CHECK-LABEL: name: rcp_legacy_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_LEGACY_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RCP_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp.legacy), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
index fce886429408..5bfde382325d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.mir
@@ -15,8 +15,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
     S_ENDPGM 0, implicit %1
@@ -35,8 +35,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RCP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
     S_ENDPGM 0, implicit %1
@@ -55,8 +55,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
     S_ENDPGM 0, implicit %1
@@ -75,8 +75,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_RCP_F64_e64_:%[0-9]+]]:vreg_64 = V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_RCP_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
index c69890ae5c85..c4cec17cc725 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rcp.s16.mir
@@ -18,8 +18,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %1
@@ -39,8 +39,8 @@ body: |
     ; CHECK-LABEL: name: rcp_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RCP_F16_e64_:%[0-9]+]]:vgpr_32 = V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RCP_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_RCP_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
index 4b78bf341b52..cb8f47b1f150 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.clamp.mir
@@ -20,8 +20,8 @@ body: |
     ; CHECK-LABEL: name: rsq_clamp_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_CLAMP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0
     S_ENDPGM 0, implicit %1
@@ -40,8 +40,8 @@ body: |
     ; CHECK-LABEL: name: rsq_clamp_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_CLAMP_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_CLAMP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_CLAMP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.clamp), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
index 2c129b08c651..0b952a17dadf 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.legacy.mir
@@ -20,8 +20,8 @@ body: |
     ; CHECK-LABEL: name: rsq_legacy_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_LEGACY_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.legacy), %0
     S_ENDPGM 0, implicit %1
@@ -40,8 +40,8 @@ body: |
     ; CHECK-LABEL: name: rsq_legacy_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_LEGACY_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_LEGACY_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_LEGACY_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq.legacy), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
index 4cf3fc5a9b42..b2ac287717e1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.mir
@@ -15,8 +15,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
     S_ENDPGM 0, implicit %1
@@ -35,8 +35,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_F32_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_RSQ_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
     S_ENDPGM 0, implicit %1
@@ -55,8 +55,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s64_vs
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
     S_ENDPGM 0, implicit %1
@@ -75,8 +75,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_RSQ_F64_e64_:%[0-9]+]]:vreg_64 = V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_RSQ_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
index 8620efd9fdce..af5ed0ac9613 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.rsq.s16.mir
@@ -5,6 +5,7 @@
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %1:sgpr(s16) (in function: rsq_s16_vs)
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %1:vgpr(s16) (in function: rsq_s16_vv)
 
+---
 name: rsq_s16_vs
 legalized: true
 regBankSelected: true
@@ -17,8 +18,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %1
@@ -38,8 +39,8 @@ body: |
     ; CHECK-LABEL: name: rsq_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_RSQ_F16_e64_:%[0-9]+]]:vgpr_32 = V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_RSQ_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_RSQ_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.rsq), %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
index 90e586c6888b..bbe63fbe7c1f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.mir
@@ -15,8 +15,8 @@ body: |
     ; CHECK-LABEL: name: sin_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %0
     S_ENDPGM 0, implicit %1
@@ -35,8 +35,8 @@ body: |
     ; CHECK-LABEL: name: sin_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_SIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_SIN_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
index 903ee4a9a040..c0ac731d9769 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.sin.s16.mir
@@ -5,6 +5,7 @@
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %1:sgpr(s16) (in function: sin_s16_vs)
 # SI-ERR: remark: <unknown>:0:0: cannot select: %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %1:vgpr(s16) (in function: sin_s16_vv)
 
+---
 name: sin_s16_vs
 legalized: true
 regBankSelected: true
@@ -17,7 +18,7 @@ body: |
     ; CHECK-LABEL: name: sin_s16_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
     ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F16_e64_]]
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
@@ -38,8 +39,8 @@ body: |
     ; CHECK-LABEL: name: sin_s16_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_SIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_SIN_F16_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_SIN_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
index ed510864f3bb..01e5cc00b6a7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s16.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %4:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -36,8 +36,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vsv
     ; GFX8: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %4:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %4
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:sgpr(s16) = G_TRUNC %0
@@ -59,8 +59,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %4:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s16) = G_TRUNC %0
@@ -82,8 +82,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fabs_lhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %5:vgpr_32 = nofpexcept V_ADD_F16_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -106,8 +106,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fabs_rhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 2, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %5:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 2, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -130,8 +130,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fneg_fabs_lhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %6:vgpr_32 = nofpexcept V_ADD_F16_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %6
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -155,8 +155,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_vvv_fneg_fabs_rhs
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %6:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %6
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -180,8 +180,8 @@ body: |
     ; GFX8-LABEL: name: fadd_s16_fneg_copy_sgpr
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX8: [[V_ADD_F16_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_ADD_F16_e64_]]
+    ; GFX8: %5:vgpr_32 = nofpexcept V_ADD_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
index 65482f3cee96..d33dc1193ab3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s32.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %2:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_FADD %0, %1
@@ -34,8 +34,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vsv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %2:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = G_FADD %0, %1
@@ -55,8 +55,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %2:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s32) = G_FADD %0, %1
@@ -76,8 +76,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %3:vgpr_32 = nofpexcept V_ADD_F32_e64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_FABS %0
@@ -97,8 +97,8 @@ body: |
     liveins: $vgpr0, $vgpr1
     ; GFX6-LABEL: name: fadd_s32_vvv_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %3:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_FABS %1
@@ -119,8 +119,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_vvv_fneg_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_ADD_F32_e64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_FABS %0
@@ -141,8 +141,8 @@ body: |
     liveins: $vgpr0, $vgpr1
     ; GFX6-LABEL: name: fadd_s32_vvv_fneg_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = G_FABS %1
@@ -166,8 +166,8 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:sgpr(s32) = G_FNEG %1
@@ -191,8 +191,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s32_copy_fneg_copy_fabs
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %6:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY]], 3, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %6
     %0:vgpr(s32) = COPY $vgpr0
     %1:sgpr(s32) = COPY $sgpr0
     %2:sgpr(s32) = G_FABS %1
@@ -222,8 +222,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %6:vgpr_32 = nofpexcept V_ADD_F32_e64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %6
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
     %2:sgpr(s32) = G_FABS %0
@@ -249,8 +249,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %6:vgpr_32 = nofpexcept V_ADD_F32_e64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %6
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
     %2:sgpr(s32) = G_FNEG %0
@@ -276,8 +276,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_F32_e64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F32_e64_]]
+    ; GFX6: %8:vgpr_32 = nofpexcept V_ADD_F32_e64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %8
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = COPY $sgpr1
     %2:sgpr(s32) = G_FABS %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
index b4b9e2ce1385..5ccba914f5fa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fadd.s64.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %2:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vgpr(s64) = G_FADD %0, %1
@@ -34,8 +34,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vsv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %2:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
     %2:vgpr(s64) = G_FADD %0, %1
@@ -55,8 +55,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %2:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s64) = COPY $sgpr0_sgpr1
     %2:vgpr(s64) = G_FADD %0, %1
@@ -76,8 +76,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %3:vreg_64 = nofpexcept V_ADD_F64 2, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %3
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vgpr(s64) = G_FABS %0
@@ -97,8 +97,8 @@ body: |
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
     ; GFX6-LABEL: name: fadd_s64_vvv_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %3:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %3
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vgpr(s64) = G_FABS %1
@@ -119,8 +119,8 @@ body: |
     ; GFX6-LABEL: name: fadd_s64_vvv_fneg_fabs_lhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %4:vreg_64 = nofpexcept V_ADD_F64 3, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vgpr(s64) = G_FABS %0
@@ -141,8 +141,8 @@ body: |
     liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
     ; GFX6-LABEL: name: fadd_s64_vvv_fneg_fabs_rhs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %4:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
     %2:vgpr(s64) = G_FABS %1
@@ -167,8 +167,8 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %4:vreg_64 = nofpexcept V_ADD_F64 0, [[COPY]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s64) = COPY $sgpr0_sgpr1
     %2:sgpr(s64) = G_FNEG %1
@@ -196,8 +196,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %6:vreg_64 = nofpexcept V_ADD_F64 2, [[COPY2]], 2, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %6
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
     %2:sgpr(s64) = G_FABS %0
@@ -223,8 +223,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %6:vreg_64 = nofpexcept V_ADD_F64 1, [[COPY2]], 1, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %6
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
     %2:sgpr(s64) = G_FNEG %0
@@ -250,8 +250,8 @@ body: |
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_64 = COPY $sgpr2_sgpr3
     ; GFX6: [[COPY2:%[0-9]+]]:vreg_64 = COPY [[COPY]]
     ; GFX6: [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY1]]
-    ; GFX6: [[V_ADD_F64_:%[0-9]+]]:vreg_64 = V_ADD_F64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ADD_F64_]]
+    ; GFX6: %8:vreg_64 = nofpexcept V_ADD_F64 3, [[COPY2]], 3, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %8
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:sgpr(s64) = COPY $sgpr2_sgpr3
     %2:sgpr(s64) = G_FABS %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
index 7bf63ebfa1fa..0f30508864f9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcanonicalize.mir
@@ -17,12 +17,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_f16_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_f16_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FCANONICALIZE %1
@@ -44,12 +44,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_f16_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, 15360, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MUL_F16_e64 0, 15360, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_f16_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FCANONICALIZE %1
@@ -72,12 +72,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %1:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %1:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -99,12 +99,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %1:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %1:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -126,12 +126,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_v2f16_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
+    ; GFX8: %1:vgpr_32 = nofpexcept V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_v2f16_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
+    ; GFX9: %1:vgpr_32 = nofpexcept V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -153,12 +153,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_v2f16_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 0, 15360, 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
+    ; GFX8: %1:vgpr_32 = nofpexcept V_PK_MUL_F16 0, 15360, 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_v2f16_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
+    ; GFX9: %1:vgpr_32 = nofpexcept V_PK_MAX_F16 8, [[COPY]], 8, [[COPY]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -180,12 +180,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f64_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX8: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MAX_F64_]]
+    ; GFX8: %1:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_f64_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F64_]]
+    ; GFX9: %1:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -207,12 +207,12 @@ body: |
 
     ; GFX8-LABEL: name: fcanonicalize_f64_flush
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX8: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, 4607182418800017408, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F64_]]
+    ; GFX8: %1:vreg_64 = nofpexcept V_MUL_F64 0, 4607182418800017408, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %1
     ; GFX9-LABEL: name: fcanonicalize_f64_flush
     ; GFX9: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GFX9: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F64_]]
+    ; GFX9: %1:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCANONICALIZE %0
     S_ENDPGM 0, implicit %1
@@ -233,12 +233,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fabs_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_fabs_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FABS %0
     %2:vgpr(s32) = G_FCANONICALIZE %1
@@ -261,12 +261,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fabs_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_fabs_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FABS %0
     %2:vgpr(s32) = G_FCANONICALIZE %1
@@ -288,12 +288,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fneg_f32_denorm
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_fneg_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FCANONICALIZE %1
@@ -315,12 +315,12 @@ body: |
     liveins: $vgpr0
     ; GFX8-LABEL: name: fcanonicalize_fneg_f32_flush
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %2:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 3212836864, 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %2
     ; GFX9-LABEL: name: fcanonicalize_fneg_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_MAX_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FCANONICALIZE %1
@@ -344,14 +344,14 @@ body: |
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX8: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %3:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %3
     ; GFX9-LABEL: name: fcanonicalize_fneg_fabs_f32_denorm
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX9: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %3:vgpr_32 = nofpexcept V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FABS %1
@@ -376,14 +376,14 @@ body: |
     ; GFX8: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX8: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX8: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX8: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
-    ; GFX8: S_ENDPGM 0, implicit [[V_MUL_F32_e64_]]
+    ; GFX8: %3:vgpr_32 = nofpexcept V_MUL_F32_e64 0, 1065353216, 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+    ; GFX8: S_ENDPGM 0, implicit %3
     ; GFX9-LABEL: name: fcanonicalize_fneg_fabs_f32_flush
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2147483648
     ; GFX9: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GFX9: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_MAX_F32_e64_]]
+    ; GFX9: %3:vgpr_32 = nofpexcept V_MAX_F32_e64 2, [[V_XOR_B32_e32_]], 2, [[V_XOR_B32_e32_]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FABS %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
index fdf6dcfb8d10..b548fcbc5da5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fceil_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_CEIL_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FCEIL %0
     $vgpr0 = COPY %1
@@ -34,8 +34,8 @@ body: |
     ; CHECK-LABEL: name: fceil_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_CEIL_F32_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_CEIL_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_CEIL_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FCEIL %0
     $vgpr0 = COPY %1
@@ -54,8 +54,8 @@ body: |
     ; CHECK-LABEL: name: fceil_s64_sv
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_CEIL_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %1
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_FCEIL %0
     $vgpr0_vgpr1 = COPY %1
@@ -74,8 +74,8 @@ body: |
     ; CHECK-LABEL: name: fceil_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_CEIL_F64_e64_:%[0-9]+]]:vreg_64 = V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_CEIL_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_CEIL_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FCEIL %0
     $vgpr0_vgpr1 = COPY %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
index 75a78190e62e..01836bc4c29d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fceil.s16.mir
@@ -38,8 +38,8 @@ body: |
     ; GCN-LABEL: name: fceil_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FCEIL %1
@@ -60,8 +60,8 @@ body: |
     ; GCN-LABEL: name: fceil_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CEIL_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FCEIL %1
@@ -82,8 +82,8 @@ body: |
     ; GCN-LABEL: name: fceil_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CEIL_F16_e64_:%[0-9]+]]:vgpr_32 = V_CEIL_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CEIL_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_CEIL_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
index c052f484bff3..bd3df5f5a06c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.mir
@@ -37,14 +37,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_oeq_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(oeq), %0, %1
@@ -62,14 +62,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ogt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_GT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ogt), %0, %1
@@ -87,14 +87,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_oge_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_GE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(oge), %0, %1
@@ -112,14 +112,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_olt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(olt), %0, %1
@@ -137,14 +137,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ole_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ole), %0, %1
@@ -162,14 +162,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_one_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(one), %0, %1
@@ -187,14 +187,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_O_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ord_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_O_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_O_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ord), %0, %1
@@ -212,14 +212,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_uno_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_U_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_U_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(uno), %0, %1
@@ -237,14 +237,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ueq_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLG_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLG_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ueq), %0, %1
@@ -262,14 +262,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ugt_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ugt), %0, %1
@@ -287,14 +287,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_uge_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(uge), %0, %1
@@ -312,14 +312,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ult_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGE_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NGE_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ult), %0, %1
@@ -337,14 +337,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ule_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGT_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NGT_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(ule), %0, %1
@@ -362,14 +362,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s32_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F32_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_une_s32_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NEQ_F32_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F32_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NEQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vcc(s1) = G_FCMP floatpred(une), %0, %1
@@ -435,14 +435,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_oeq_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_EQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_EQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(oeq), %0, %1
@@ -460,14 +460,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ogt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_GT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_GT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ogt), %0, %1
@@ -485,14 +485,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_oge_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_GE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_GE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(oge), %0, %1
@@ -510,14 +510,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_olt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(olt), %0, %1
@@ -535,14 +535,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ole_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ole), %0, %1
@@ -560,14 +560,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_one_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_LG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_LG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(one), %0, %1
@@ -585,14 +585,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_O_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ord_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_O_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_O_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_O_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ord), %0, %1
@@ -610,14 +610,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_uno_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_U_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_U_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(uno), %0, %1
@@ -635,14 +635,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ueq_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLG_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLG_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ueq), %0, %1
@@ -660,14 +660,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ugt_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ugt), %0, %1
@@ -685,14 +685,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_uge_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NLT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NLT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(uge), %0, %1
@@ -710,14 +710,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ult_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NGE_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NGE_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ult), %0, %1
@@ -735,14 +735,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_ule_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NGT_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NGT_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(ule), %0, %1
@@ -760,14 +760,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s64_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE64: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE64: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F64_e64_]]
+    ; WAVE64: %2:sreg_64 = nofpexcept V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %2
     ; WAVE32-LABEL: name: fcmp_une_s64_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; WAVE32: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; WAVE32: [[V_CMP_NEQ_F64_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F64_e64_]]
+    ; WAVE32: %2:sreg_32 = nofpexcept V_CMP_NEQ_F64_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = COPY $vgpr2_vgpr3
     %2:vcc(s1) = G_FCMP floatpred(une), %0, %1
@@ -809,15 +809,15 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s32_vv_select_user
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], [[V_CMP_EQ_F32_e64_]], implicit $exec
+    ; WAVE64: %2:sreg_64_xexec = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], %2, implicit $exec
     ; WAVE64: S_ENDPGM 0, implicit [[V_CNDMASK_B32_e64_]]
     ; WAVE32-LABEL: name: fcmp_oeq_s32_vv_select_user
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], [[V_CMP_EQ_F32_e64_]], implicit $exec
+    ; WAVE32: %2:sreg_32_xm0_xexec = nofpexcept V_CMP_EQ_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, [[COPY1]], 0, [[COPY]], %2, implicit $exec
     ; WAVE32: S_ENDPGM 0, implicit [[V_CNDMASK_B32_e64_]]
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
index a0354d840393..7d615a671436 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fcmp.s16.mir
@@ -43,14 +43,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oeq_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_EQ_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_oeq_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_EQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_EQ_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_EQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -70,14 +70,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ogt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GT_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ogt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GT_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_GT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -97,14 +97,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_oge_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_GE_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_oge_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_GE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_GE_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_GE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -124,14 +124,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_olt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LT_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_olt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LT_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_LT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -151,14 +151,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ole_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LE_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ole_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LE_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_LE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -177,14 +177,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_one_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_one_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -204,14 +204,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ord_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ord_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_LG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_LG_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_LG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -231,14 +231,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uno_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_U_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_uno_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_U_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_U_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_U_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -258,14 +258,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ueq_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLG_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ueq_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLG_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLG_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NLG_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -285,14 +285,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ugt_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLE_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ugt_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLE_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NLE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -312,14 +312,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_uge_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NLT_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_uge_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NLT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NLT_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NLT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -339,14 +339,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ult_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGE_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ult_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGE_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGE_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NGE_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -366,14 +366,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_ule_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NGT_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_ule_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NGT_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NGT_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NGT_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -393,14 +393,14 @@ body: |
     ; WAVE64-LABEL: name: fcmp_une_s16_vv
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE64: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_64 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE64: S_ENDPGM 0, implicit [[V_CMP_NEQ_F16_e64_]]
+    ; WAVE64: %4:sreg_64 = nofpexcept V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE64: S_ENDPGM 0, implicit %4
     ; WAVE32-LABEL: name: fcmp_une_s16_vv
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; WAVE32: [[V_CMP_NEQ_F16_e64_:%[0-9]+]]:sreg_32 = V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
-    ; WAVE32: S_ENDPGM 0, implicit [[V_CMP_NEQ_F16_e64_]]
+    ; WAVE32: %4:sreg_32 = nofpexcept V_CMP_NEQ_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
+    ; WAVE32: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
index a0339fa9551e..44f670d3ae6b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fexp2.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fexp2_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_EXP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FEXP2 %0
     S_ENDPGM 0, implicit %1
@@ -34,8 +34,8 @@ body: |
     ; CHECK-LABEL: name: fexp2_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_EXP_F32_e64_:%[0-9]+]]:vgpr_32 = V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_EXP_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_EXP_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FEXP2 %0
     S_ENDPGM 0, implicit %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
index 68bde4c25b64..5809a4f7eee1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s16.mir
@@ -38,8 +38,8 @@ body: |
     ; VI-LABEL: name: ffloor_s16_vv
     ; VI: liveins: $vgpr0
     ; VI: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
+    ; VI: %2:vgpr_32 = nofpexcept V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; VI: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FFLOOR %1
@@ -60,8 +60,8 @@ body: |
     ; VI-LABEL: name: ffloor_s16_vs
     ; VI: liveins: $sgpr0
     ; VI: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
+    ; VI: %2:vgpr_32 = nofpexcept V_FLOOR_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; VI: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FFLOOR %1
@@ -90,8 +90,8 @@ body: |
     ; VI-LABEL: name: ffloor_fneg_s16_vv
     ; VI: liveins: $vgpr0
     ; VI: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; VI: [[V_FLOOR_F16_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; VI: $vgpr0 = COPY [[V_FLOOR_F16_e64_]]
+    ; VI: %3:vgpr_32 = nofpexcept V_FLOOR_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; VI: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
index 710a7927acd2..9da8fcb881f8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s32.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FFLOOR %0
     $vgpr0 = COPY %1
@@ -34,8 +34,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_FLOOR_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FFLOOR %0
     $vgpr0 = COPY %1
@@ -54,8 +54,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FFLOOR %1
@@ -74,8 +74,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_FLOOR_F32_e64_:%[0-9]+]]:vgpr_32 = V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_FLOOR_F32_e64_]]
+    ; CHECK: %2:vgpr_32 = nofpexcept V_FLOOR_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FFLOOR %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
index 276a1ffb9930..97310017a23d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-ffloor.s64.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_FLOOR_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_FLOOR_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FFLOOR %0
     $vgpr0_vgpr1 = COPY %1
@@ -50,8 +50,8 @@ body: |
     ; CHECK-LABEL: name: ffloor_fneg_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_FLOOR_F64_e64_:%[0-9]+]]:vreg_64 = V_FLOOR_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_FLOOR_F64_e64_]]
+    ; CHECK: %2:vreg_64 = nofpexcept V_FLOOR_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FNEG %0
     %2:vgpr(s64) = G_FFLOOR %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
index 2034eb73fdf0..0a02c23c3229 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fma.s32.mir
@@ -17,21 +17,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %3:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %3
     ; GFX9-DL-LABEL: name: fma_f32
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX9-DL: %3:vgpr_32 = nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %3
     ; GFX10-LABEL: name: fma_f32
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX10: %3:vgpr_32 = nofpexcept V_FMAC_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -54,21 +54,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_FMA_F32 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src0
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX9-DL: %4:vgpr_32 = nofpexcept V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %4
     ; GFX10-LABEL: name: fma_f32_fneg_src0
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX10: %4:vgpr_32 = nofpexcept V_FMAC_F32_e64 1, [[COPY]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -92,21 +92,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src1
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX9-DL: %4:vgpr_32 = nofpexcept V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %4
     ; GFX10-LABEL: name: fma_f32_fneg_src1
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMAC_F32_e64_:%[0-9]+]]:vgpr_32 = V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMAC_F32_e64_]]
+    ; GFX10: %4:vgpr_32 = nofpexcept V_FMAC_F32_e64 0, [[COPY]], 1, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -130,21 +130,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     ; GFX9-DL-LABEL: name: fma_f32_fneg_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX9-DL: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %4
     ; GFX10-LABEL: name: fma_f32_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX10: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -168,21 +168,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %4
     ; GFX9-DL-LABEL: name: fma_f32_fabs_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX9-DL: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %4
     ; GFX10-LABEL: name: fma_f32_fabs_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX10: %4:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 2, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2
@@ -206,21 +206,21 @@ body: |
     ; GFX6: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX6: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX6: %5:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit %5
     ; GFX9-DL-LABEL: name: fma_f32_copy_fneg_src2
     ; GFX9-DL: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9-DL: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX9-DL: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX9-DL: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX9-DL: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX9-DL: %5:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX9-DL: S_ENDPGM 0, implicit %5
     ; GFX10-LABEL: name: fma_f32_copy_fneg_src2
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_FMA_F32_:%[0-9]+]]:vgpr_32 = V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_FMA_F32_]]
+    ; GFX10: %5:vgpr_32 = nofpexcept V_FMA_F32 0, [[COPY]], 0, [[COPY1]], 1, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
index 720f9285961a..7c7431584912 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.mir
@@ -21,16 +21,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -89,16 +89,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
index e94ab1c3cdc5..4320c0e41332 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.s16.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_ieee_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; CHECK: %4:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -36,8 +36,8 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_ieee_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; CHECK: %5:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
index bc2e53d421c2..015aab48e0fa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum-ieee.v2s16.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX9-LABEL: name: fmaxnum_ieee_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FMAXNUM_IEEE %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
index a440e801682f..075fab1a0726 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.mir
@@ -22,16 +22,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -88,16 +88,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MAX_F32_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MAX_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MAX_F64_:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_1:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MAX_F64_2:%[0-9]+]]:vreg_64 = V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MAX_F64_]], implicit [[V_MAX_F64_1]], implicit [[V_MAX_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MAX_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MAX_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
index 1bf0c576adfe..8523fba9852b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.s16.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; CHECK: %4:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -36,8 +36,8 @@ body: |
     ; CHECK-LABEL: name: fmaxnum_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MAX_F16_e64_:%[0-9]+]]:vgpr_32 = V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MAX_F16_e64_]]
+    ; CHECK: %5:vgpr_32 = nofpexcept V_MAX_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
index bc83f90c8a11..1b78f7abacb4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmaxnum.v2s16.mir
@@ -14,8 +14,8 @@ body: |
     ; GFX9-LABEL: name: fmaxnum_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MAX_F16_:%[0-9]+]]:vgpr_32 = V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MAX_F16_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_PK_MAX_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FMAXNUM %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
index 40b97460b203..58a41e9f3cf3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.mir
@@ -21,16 +21,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -89,16 +89,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
index cf00b1b1d80a..38216045c5d8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.s16.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fminnum_ieee_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
+    ; CHECK: %4:vgpr_32 = nofpexcept V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -36,8 +36,8 @@ body: |
     ; CHECK-LABEL: name: fminnum_ieee_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
+    ; CHECK: %5:vgpr_32 = nofpexcept V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
index 0bb68ef86ed9..88a21a12a43b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum-ieee.v2s16.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX9-LABEL: name: fminnum_ieee_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MIN_F16_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FMINNUM_IEEE %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
index 74350b247fc4..0f8bb76c1b17 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.mir
@@ -22,16 +22,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -88,16 +88,16 @@ body: |
     ; GFX7: [[COPY4:%[0-9]+]]:sreg_64 = COPY $sgpr10_sgpr11
     ; GFX7: [[COPY5:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
     ; GFX7: [[COPY6:%[0-9]+]]:vreg_64 = COPY $vgpr12_vgpr13
-    ; GFX7: [[V_MIN_F32_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: FLAT_STORE_DWORD [[COPY3]], [[V_MIN_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GFX7: [[V_MIN_F64_:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_1:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: [[V_MIN_F64_2:%[0-9]+]]:vreg_64 = V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_MIN_F64_]], implicit [[V_MIN_F64_1]], implicit [[V_MIN_F64_2]]
+    ; GFX7: %7:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %8:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %9:vgpr_32 = nofpexcept V_MIN_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: FLAT_STORE_DWORD [[COPY3]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GFX7: %10:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY4]], 0, [[COPY5]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %11:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY4]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: %12:vreg_64 = nofpexcept V_MIN_F64 0, [[COPY5]], 0, [[COPY6]], 0, 0, implicit $mode, implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit %10, implicit %11, implicit %12
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
index 0a4f65544a46..bb8f6fe48579 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.s16.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: fminnum_f16_vv
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
+    ; CHECK: %4:vgpr_32 = nofpexcept V_MIN_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %4
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0
@@ -36,8 +36,8 @@ body: |
     ; CHECK-LABEL: name: fminnum_f16_v_fneg_v
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; CHECK: [[V_MIN_F16_e64_:%[0-9]+]]:vgpr_32 = V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: S_ENDPGM 0, implicit [[V_MIN_F16_e64_]]
+    ; CHECK: %5:vgpr_32 = nofpexcept V_MIN_F16_e64 0, [[COPY]], 1, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: S_ENDPGM 0, implicit %5
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s16) = G_TRUNC %0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
index 255d05d39f00..b212c3631653 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fminnum.v2s16.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX9-LABEL: name: fminnum_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MIN_F16_:%[0-9]+]]:vgpr_32 = V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MIN_F16_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_PK_MIN_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FMINNUM %0, %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
index babbe653b980..18639b14a211 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.mir
@@ -15,12 +15,12 @@ body: |
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
     ; GCN: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: %4:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %5:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %6:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: FLAT_STORE_DWORD [[COPY3]], %4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY3]], %5, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY3]], %6, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -53,10 +53,10 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
     ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F64_1:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F64_2:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_MUL_F64_]], implicit [[V_MUL_F64_1]], implicit [[V_MUL_F64_2]]
+    ; GCN: %4:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %5:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %6:vreg_64 = nofpexcept V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %4, implicit %5, implicit %6
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = COPY $vgpr0_vgpr1
     %2:vgpr(s64) = COPY $vgpr2_vgpr3
@@ -86,10 +86,10 @@ body: |
     ; GCN-LABEL: name: fmul_f16
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F16_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]], implicit [[V_MUL_F16_e64_1]], implicit [[V_MUL_F16_e64_2]]
+    ; GCN: %7:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %8:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %9:vgpr_32 = nofpexcept V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: S_ENDPGM 0, implicit %7, implicit %8, implicit %9
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s32) = COPY $vgpr1
@@ -123,26 +123,26 @@ body: |
     ; GCN-LABEL: name: fmul_modifiers_f32
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3
-    ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_5:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_6:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_3]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_4]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_5]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_6]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_7]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_8]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_9]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: %6:vgpr_32 = nofpexcept V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %7:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %8:vgpr_32 = nofpexcept V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %9:vgpr_32 = nofpexcept V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %10:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %11:vgpr_32 = nofpexcept V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %12:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %13:vgpr_32 = nofpexcept V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %14:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %15:vgpr_32 = nofpexcept V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %6, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %7, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %8, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %9, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %10, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %11, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %12, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %13, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %14, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY1]], %15, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(p1) = COPY $vgpr2_vgpr3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
index b70c8e25ccd1..b614106a6f3e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fmul.v2s16.mir
@@ -13,8 +13,8 @@ body: |
     ; GFX9-LABEL: name: fmul_v2f16_vv
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
+    ; GFX9: %2:vgpr_32 = nofpexcept V_PK_MUL_F16 8, [[COPY]], 8, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %2
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FMUL %0, %1
@@ -33,8 +33,8 @@ body: |
     ; GFX9-LABEL: name: fmul_v2f16_fneg_v_fneg_v
     ; GFX9: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32 = V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]]
+    ; GFX9: %4:vgpr_32 = nofpexcept V_PK_MUL_F16 11, [[COPY]], 11, [[COPY1]], 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %4
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(<2 x s16>) = COPY $vgpr1
     %2:vgpr(<2 x s16>) = G_FNEG %0
@@ -60,8 +60,8 @@ body: |
     ; GFX9: [[FNEG:%[0-9]+]]:vgpr(s16) = G_FNEG [[TRUNC]]
     ; GFX9: [[ANYEXT:%[0-9]+]]:vgpr(s32) = G_ANYEXT [[FNEG]](s16)
     ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:vgpr_32(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[COPY2]](s32)
-    ; GFX9: [[V_PK_MUL_F16_:%[0-9]+]]:vgpr_32(<2 x s16>) = V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $mode, implicit $exec
-    ; GFX9: S_ENDPGM 0, implicit [[V_PK_MUL_F16_]](<2 x s16>)
+    ; GFX9: %7:vgpr_32(<2 x s16>) = nofpexcept V_PK_MUL_F16 8, [[BUILD_VECTOR_TRUNC]](<2 x s16>), 8, [[COPY]](<2 x s16>), 0, 0, 0, 0, 0, implicit $mode, implicit $exec
+    ; GFX9: S_ENDPGM 0, implicit %7(<2 x s16>)
     %0:vgpr(<2 x s16>) = COPY $vgpr0
     %1:vgpr(s32) = COPY $vgpr1
     %2:vgpr(s32) = COPY $vgpr2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
index 64662d748cd1..f8171351781a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptosi.mir
@@ -14,8 +14,8 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
+    ; GCN: %1:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FPTOSI %0
     $vgpr0 = COPY %1
@@ -34,8 +34,8 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
+    ; GCN: %1:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FPTOSI %0
     $vgpr0 = COPY %1
@@ -54,8 +54,8 @@ body: |
     ; GCN-LABEL: name: fptosi_s32_to_s32_fneg_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_I32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FPTOSI %1
@@ -75,9 +75,9 @@ body: |
     ; GCN-LABEL: name: fptosi_s16_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %3, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s32) = G_FPTOSI %1
@@ -97,9 +97,9 @@ body: |
     ; GCN-LABEL: name: fptosi_s16_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %3, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s32) = G_FPTOSI %1
@@ -121,9 +121,9 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
     ; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_I32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_I32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_I32_F32_e32_]]
+    ; GCN: %4:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_I32_F32_e32 %4, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
index a13620ad9452..e6dd3fbb04a4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fptoui.mir
@@ -15,10 +15,10 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
-    ; GCN: [[V_CVT_U32_F32_e64_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e64_1:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
-    ; GCN: FLAT_STORE_DWORD [[COPY2]], [[V_CVT_U32_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_U32_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: %4:vgpr_32 = nofpexcept V_CVT_U32_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: FLAT_STORE_DWORD [[COPY2]], %3, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
+    ; GCN: FLAT_STORE_DWORD [[COPY2]], %4, 0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4, addrspace 1)
     %0:sgpr(s32) = COPY $sgpr0
 
     %1:vgpr(s32) = COPY $vgpr0
@@ -48,9 +48,9 @@ body: |
     ; GCN-LABEL: name: fptoui_s16_to_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %3, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s32) = G_FPTOUI %1
@@ -70,9 +70,9 @@ body: |
     ; GCN-LABEL: name: fptoui_s16_to_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[COPY]], implicit $mode, implicit $exec
+    ; GCN: %2:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %3, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s32) = G_FPTOUI %1
@@ -94,9 +94,9 @@ body: |
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 32768
     ; GCN: [[V_XOR_B32_e32_:%[0-9]+]]:vgpr_32 = V_XOR_B32_e32 [[S_MOV_B32_]], [[COPY]], implicit $exec
-    ; GCN: [[V_CVT_F32_F16_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
-    ; GCN: [[V_CVT_U32_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_U32_F32_e32 [[V_CVT_F32_F16_e32_]], implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_CVT_U32_F32_e32_]]
+    ; GCN: %4:vgpr_32 = nofpexcept V_CVT_F32_F16_e32 [[V_XOR_B32_e32_]], implicit $mode, implicit $exec
+    ; GCN: %3:vgpr_32 = nofpexcept V_CVT_U32_F32_e32 %4, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
index 316046edaad4..7e918a6700a1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.mir
@@ -15,8 +15,8 @@ body: |
     ; GCN-LABEL: name: frint_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
+    ; GCN: %1:vgpr_32 = nofpexcept V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FRINT %0
     $vgpr0 = COPY %1
@@ -35,8 +35,8 @@ body: |
     ; GCN-LABEL: name: frint_s32_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
+    ; GCN: %1:vgpr_32 = nofpexcept V_RNDNE_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_FRINT %0
     $vgpr0 = COPY %1
@@ -55,8 +55,8 @@ body: |
     ; GCN-LABEL: name: frint_fneg_s32_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F32_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F32_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F32_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_FNEG %0
     %2:vgpr(s32) = G_FRINT %1
@@ -76,8 +76,8 @@ body: |
     ; GCN-LABEL: name: frint_s64_vv
     ; GCN: liveins: $vgpr0_vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
+    ; GCN: %1:vreg_64 = nofpexcept V_RNDNE_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0_vgpr1 = COPY %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FRINT %0
     $vgpr0_vgpr1 = COPY %1
@@ -96,8 +96,8 @@ body: |
     ; GCN-LABEL: name: frint_s64_fneg_vv
     ; GCN: liveins: $vgpr0_vgpr1
     ; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; GCN: [[V_RNDNE_F64_e64_:%[0-9]+]]:vreg_64 = V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0_vgpr1 = COPY [[V_RNDNE_F64_e64_]]
+    ; GCN: %2:vreg_64 = nofpexcept V_RNDNE_F64_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0_vgpr1 = COPY %2
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_FNEG %0
     %2:vgpr(s64) = G_FRINT %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
index e449a7b93baa..b4af0caa8170 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-frint.s16.mir
@@ -38,8 +38,8 @@ body: |
     ; GCN-LABEL: name: frint_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FRINT %1
@@ -60,8 +60,8 @@ body: |
     ; GCN-LABEL: name: frint_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_RNDNE_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FRINT %1
@@ -82,8 +82,8 @@ body: |
     ; GCN-LABEL: name: frint_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_RNDNE_F16_e64_:%[0-9]+]]:vgpr_32 = V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_RNDNE_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_RNDNE_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
index a9cd8c51f62a..4551dab4818e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.mir
@@ -14,8 +14,8 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s32_vv
     ; CHECK: liveins: $vgpr0
     ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s32) = G_INTRINSIC_TRUNC %0
     $vgpr0 = COPY %1
@@ -34,8 +34,8 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s32_vs
     ; CHECK: liveins: $sgpr0
     ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; CHECK: [[V_TRUNC_F32_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0 = COPY [[V_TRUNC_F32_e64_]]
+    ; CHECK: %1:vgpr_32 = nofpexcept V_TRUNC_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s32) = G_INTRINSIC_TRUNC %0
     $vgpr0 = COPY %1
@@ -54,8 +54,8 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s64_sv
     ; CHECK: liveins: $sgpr0_sgpr1
     ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
-    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %1
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s64) = G_INTRINSIC_TRUNC %0
     $vgpr0_vgpr1 = COPY %1
@@ -74,8 +74,8 @@ body: |
     ; CHECK-LABEL: name: intrinsic_trunc_s64_vv
     ; CHECK: liveins: $vgpr0_vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
-    ; CHECK: [[V_TRUNC_F64_e64_:%[0-9]+]]:vreg_64 = V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; CHECK: $vgpr0_vgpr1 = COPY [[V_TRUNC_F64_e64_]]
+    ; CHECK: %1:vreg_64 = nofpexcept V_TRUNC_F64_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; CHECK: $vgpr0_vgpr1 = COPY %1
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s64) = G_INTRINSIC_TRUNC %0
     $vgpr0_vgpr1 = COPY %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
index d2fb035c8b6b..ec8c774198e0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-intrinsic-trunc.s16.mir
@@ -14,8 +14,8 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC_TRUNC %1
@@ -36,8 +36,8 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_s16_vs
     ; GCN: liveins: $sgpr0
     ; GCN: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
+    ; GCN: %2:vgpr_32 = nofpexcept V_TRUNC_F16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %2
     %0:sgpr(s32) = COPY $sgpr0
     %1:sgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_INTRINSIC_TRUNC %1
@@ -58,8 +58,8 @@ body: |
     ; GCN-LABEL: name: intrinsic_trunc_fneg_s16_vv
     ; GCN: liveins: $vgpr0
     ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GCN: [[V_TRUNC_F16_e64_:%[0-9]+]]:vgpr_32 = V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
-    ; GCN: $vgpr0 = COPY [[V_TRUNC_F16_e64_]]
+    ; GCN: %3:vgpr_32 = nofpexcept V_TRUNC_F16_e64 1, [[COPY]], 0, 0, implicit $mode, implicit $exec
+    ; GCN: $vgpr0 = COPY %3
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_TRUNC %0
     %2:vgpr(s16) = G_FNEG %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
index 3cd2362b1093..6d6a70e5f578 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-sitofp.mir
@@ -59,15 +59,15 @@ body: |
     ; WAVE64: liveins: $vgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
-    ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
+    ; WAVE64: $vgpr0 = COPY %1
     ; WAVE32-LABEL: name: sitofp_s32_to_s16_vv
     ; WAVE32: liveins: $vgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
-    ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
+    ; WAVE32: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_SITOFP %0
     %2:vgpr(s32) = G_ANYEXT %1
@@ -88,15 +88,15 @@ body: |
     ; WAVE64: liveins: $sgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE64: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
-    ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
+    ; WAVE64: $vgpr0 = COPY %1
     ; WAVE32-LABEL: name: sitofp_s32_to_s16_vs
     ; WAVE32: liveins: $sgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE32: [[V_CVT_F32_I32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_I32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
-    ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_I32_e32_]], implicit $mode, implicit $exec
+    ; WAVE32: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s16) = G_SITOFP %0
     %2:vgpr(s32) = G_ANYEXT %1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
index 421b987f8f92..0714fe3f6f1f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-uitofp.mir
@@ -68,15 +68,15 @@ body: |
     ; WAVE64: liveins: $vgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
-    ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
+    ; WAVE64: $vgpr0 = COPY %1
     ; WAVE32-LABEL: name: uitofp_s32_to_s16_vv
     ; WAVE32: liveins: $vgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
     ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
-    ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
+    ; WAVE32: $vgpr0 = COPY %1
     %0:vgpr(s32) = COPY $vgpr0
     %1:vgpr(s16) = G_UITOFP %0
     %2:vgpr(s32) = G_ANYEXT %1
@@ -97,15 +97,15 @@ body: |
     ; WAVE64: liveins: $sgpr0
     ; WAVE64: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE64: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE64: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
-    ; WAVE64: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE64: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
+    ; WAVE64: $vgpr0 = COPY %1
     ; WAVE32-LABEL: name: uitofp_s32_to_s16_vs
     ; WAVE32: liveins: $sgpr0
     ; WAVE32: $vcc_hi = IMPLICIT_DEF
     ; WAVE32: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
     ; WAVE32: [[V_CVT_F32_U32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F32_U32_e32 [[COPY]], implicit $mode, implicit $exec
-    ; WAVE32: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
-    ; WAVE32: $vgpr0 = COPY [[V_CVT_F16_F32_e32_]]
+    ; WAVE32: %1:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[V_CVT_F32_U32_e32_]], implicit $mode, implicit $exec
+    ; WAVE32: $vgpr0 = COPY %1
     %0:sgpr(s32) = COPY $sgpr0
     %1:vgpr(s16) = G_UITOFP %0
     %2:vgpr(s32) = G_ANYEXT %1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-add.mir b/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
index d6e616109dfc..e2998d235487 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-add.mir
@@ -175,16 +175,16 @@ registers:
   - { id: 2, class: vecr }
 # SSE:           %0:vr128 = COPY $xmm0
 # SSE-NEXT:      %1:vr128 = COPY $xmm1
-# SSE-NEXT:      %2:vr128 = ADDPSrr %0, %1
+# SSE-NEXT:      %2:vr128 = nofpexcept ADDPSrr %0, %1
 # AVX:           %0:vr128 = COPY $xmm0
 # AVX-NEXT:      %1:vr128 = COPY $xmm1
-# AVX-NEXT:      %2:vr128 = VADDPSrr %0, %1
+# AVX-NEXT:      %2:vr128 = nofpexcept VADDPSrr %0, %1
 # AVX512F:       %0:vr128 = COPY $xmm0
 # AVX512F-NEXT:  1:vr128 = COPY $xmm1
-# AVX512F-NEXT:  %2:vr128 = VADDPSrr %0, %1
+# AVX512F-NEXT:  %2:vr128 = nofpexcept VADDPSrr %0, %1
 # AVX512VL:      %0:vr128x = COPY $xmm0
 # AVX512VL-NEXT: %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT: %2:vr128x = VADDPSZ128rr %0, %1
+# AVX512VL-NEXT: %2:vr128x = nofpexcept VADDPSZ128rr %0, %1
 body:             |
   bb.1 (%ir-block.0):
     liveins: $xmm0, $xmm1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
index a34f622db1b7..2c4b560f2a29 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
@@ -44,8 +44,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSSrr]]
+    ; SSE: %4:fr32 = nofpexcept ADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fadd_float
@@ -53,8 +53,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSSrr]]
+    ; AVX: %4:fr32 = nofpexcept VADDSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fadd_float
@@ -62,8 +62,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
+    ; AVX512F: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fadd_float
@@ -71,8 +71,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSSZrr]]
+    ; AVX512VL: %4:fr32x = nofpexcept VADDSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0
@@ -113,8 +113,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[ADDSDrr]]
+    ; SSE: %4:fr64 = nofpexcept ADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fadd_double
@@ -122,8 +122,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VADDSDrr]]
+    ; AVX: %4:fr64 = nofpexcept VADDSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fadd_double
@@ -131,8 +131,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
+    ; AVX512F: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fadd_double
@@ -140,8 +140,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VADDSDZrr]]
+    ; AVX512VL: %4:fr64x = nofpexcept VADDSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
index 8741968a5da6..e4c929ed28fa 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
@@ -44,8 +44,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSSrr]]
+    ; SSE: %4:fr32 = nofpexcept DIVSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fdiv_float
@@ -53,8 +53,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSSrr]]
+    ; AVX: %4:fr32 = nofpexcept VDIVSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fdiv_float
@@ -62,8 +62,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
+    ; AVX512F: %4:fr32x = nofpexcept VDIVSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fdiv_float
@@ -71,8 +71,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSSZrr]]
+    ; AVX512VL: %4:fr32x = nofpexcept VDIVSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0
@@ -113,8 +113,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[DIVSDrr]]
+    ; SSE: %4:fr64 = nofpexcept DIVSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fdiv_double
@@ -122,8 +122,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VDIVSDrr]]
+    ; AVX: %4:fr64 = nofpexcept VDIVSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fdiv_double
@@ -131,8 +131,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
+    ; AVX512F: %4:fr64x = nofpexcept VDIVSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fdiv_double
@@ -140,8 +140,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VDIVSDZrr]]
+    ; AVX512VL: %4:fr64x = nofpexcept VDIVSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
index 826e70dea2d5..6cd3a150d9c5 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
@@ -44,8 +44,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSSrr]]
+    ; SSE: %4:fr32 = nofpexcept MULSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fmul_float
@@ -53,8 +53,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSSrr]]
+    ; AVX: %4:fr32 = nofpexcept VMULSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fmul_float
@@ -62,8 +62,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
+    ; AVX512F: %4:fr32x = nofpexcept VMULSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fmul_float
@@ -71,8 +71,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSSZrr]]
+    ; AVX512VL: %4:fr32x = nofpexcept VMULSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0
@@ -113,8 +113,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[MULSDrr]]
+    ; SSE: %4:fr64 = nofpexcept MULSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fmul_double
@@ -122,8 +122,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VMULSDrr]]
+    ; AVX: %4:fr64 = nofpexcept VMULSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fmul_double
@@ -131,8 +131,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
+    ; AVX512F: %4:fr64x = nofpexcept VMULSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fmul_double
@@ -140,8 +140,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VMULSDZrr]]
+    ; AVX512VL: %4:fr64x = nofpexcept VMULSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
index 97edc58e69d0..05ade0e2c6c9 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
@@ -30,8 +30,8 @@ body:             |
     ; ALL-LABEL: name: test
     ; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; ALL: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
-    ; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY1]]
-    ; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSS2SDrr]]
+    ; ALL: %2:fr64 = nofpexcept CVTSS2SDrr [[COPY1]], implicit $mxcsr
+    ; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY %2
     ; ALL: $xmm0 = COPY [[COPY2]]
     ; ALL: RET 0, implicit $xmm0
     %1:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir
index bb6c1e9930d9..9ad08072ae53 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fptrunc-scalar.mir
@@ -27,8 +27,8 @@ body:             |
     ; ALL: liveins: $xmm0
     ; ALL: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; ALL: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
-    ; ALL: [[CVTSD2SSrr:%[0-9]+]]:fr32 = CVTSD2SSrr [[COPY1]]
-    ; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY [[CVTSD2SSrr]]
+    ; ALL: %2:fr32 = nofpexcept CVTSD2SSrr [[COPY1]], implicit $mxcsr
+    ; ALL: [[COPY2:%[0-9]+]]:vr128 = COPY %2
     ; ALL: $xmm0 = COPY [[COPY2]]
     ; ALL: RET 0, implicit $xmm0
     %1:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir b/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
index efc6cd571158..95881d249e0d 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
@@ -44,8 +44,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSSrr]]
+    ; SSE: %4:fr32 = nofpexcept SUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fsub_float
@@ -53,8 +53,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
-    ; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSSrr]]
+    ; AVX: %4:fr32 = nofpexcept VSUBSSrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fsub_float
@@ -62,8 +62,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
+    ; AVX512F: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fsub_float
@@ -71,8 +71,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr32x = COPY [[COPY2]]
-    ; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSSZrr]]
+    ; AVX512VL: %4:fr32x = nofpexcept VSUBSSZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0
@@ -113,8 +113,8 @@ body:             |
     ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; SSE: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY1]], [[COPY3]]
-    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY [[SUBSDrr]]
+    ; SSE: %4:fr64 = nofpexcept SUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; SSE: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; SSE: $xmm0 = COPY [[COPY4]]
     ; SSE: RET 0, implicit $xmm0
     ; AVX-LABEL: name: test_fsub_double
@@ -122,8 +122,8 @@ body:             |
     ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
     ; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
     ; AVX: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
-    ; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY1]], [[COPY3]]
-    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY [[VSUBSDrr]]
+    ; AVX: %4:fr64 = nofpexcept VSUBSDrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX: [[COPY4:%[0-9]+]]:vr128 = COPY %4
     ; AVX: $xmm0 = COPY [[COPY4]]
     ; AVX: RET 0, implicit $xmm0
     ; AVX512F-LABEL: name: test_fsub_double
@@ -131,8 +131,8 @@ body:             |
     ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512F: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
+    ; AVX512F: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512F: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512F: $xmm0 = COPY [[COPY4]]
     ; AVX512F: RET 0, implicit $xmm0
     ; AVX512VL-LABEL: name: test_fsub_double
@@ -140,8 +140,8 @@ body:             |
     ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
     ; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY $xmm1
     ; AVX512VL: [[COPY3:%[0-9]+]]:fr64x = COPY [[COPY2]]
-    ; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY1]], [[COPY3]]
-    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY [[VSUBSDZrr]]
+    ; AVX512VL: %4:fr64x = nofpexcept VSUBSDZrr [[COPY1]], [[COPY3]], implicit $mxcsr
+    ; AVX512VL: [[COPY4:%[0-9]+]]:vr128x = COPY %4
     ; AVX512VL: $xmm0 = COPY [[COPY4]]
     ; AVX512VL: RET 0, implicit $xmm0
     %2:vecr(s128) = COPY $xmm0

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir b/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
index 1f46b02082ae..e0d6108f0387 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/select-sub.mir
@@ -111,13 +111,13 @@ registers:
   - { id: 2, class: vecr }
 # NO_AVX512VL:   %0:vr128 = COPY $xmm0
 # NO_AVX512VL:   %1:vr128 = COPY $xmm1
-# SSE-NEXT:      %2:vr128 = SUBPSrr %0, %1
-# AVX-NEXT:      %2:vr128 = VSUBPSrr %0, %1
-# AVX512F-NEXT:  %2:vr128 = VSUBPSrr %0, %1
+# SSE-NEXT:      %2:vr128 = nofpexcept SUBPSrr %0, %1
+# AVX-NEXT:      %2:vr128 = nofpexcept VSUBPSrr %0, %1
+# AVX512F-NEXT:  %2:vr128 = nofpexcept VSUBPSrr %0, %1
 #
 # AVX512VL:      %0:vr128x = COPY $xmm0
 # AVX512VL:      %1:vr128x = COPY $xmm1
-# AVX512VL-NEXT: %2:vr128x = VSUBPSZ128rr %0, %1
+# AVX512VL-NEXT: %2:vr128x = nofpexcept VSUBPSZ128rr %0, %1
 body:             |
   bb.1 (%ir-block.0):
     liveins: $xmm0, $xmm1

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir
index 6ff271d487e5..e957859161ee 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-fptosi.mir
@@ -71,8 +71,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
-    ; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[CVTTSS2SIrr]].sub_8bit
+    ; CHECK: %3:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY %3.sub_8bit
     ; CHECK: $al = COPY [[COPY2]]
     ; CHECK: RET 0, implicit $al
     %1:vecr(s128) = COPY $xmm0
@@ -102,8 +102,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
-    ; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY [[CVTTSS2SIrr]].sub_16bit
+    ; CHECK: %3:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY %3.sub_16bit
     ; CHECK: $ax = COPY [[COPY2]]
     ; CHECK: RET 0, implicit $ax
     %1:vecr(s128) = COPY $xmm0
@@ -132,8 +132,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
-    ; CHECK: [[CVTTSS2SIrr:%[0-9]+]]:gr32 = CVTTSS2SIrr [[COPY1]]
-    ; CHECK: $eax = COPY [[CVTTSS2SIrr]]
+    ; CHECK: %2:gr32 = nofpexcept CVTTSS2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: $eax = COPY %2
     ; CHECK: RET 0, implicit $eax
     %1:vecr(s128) = COPY $xmm0
     %0:vecr(s32) = G_TRUNC %1(s128)
@@ -160,8 +160,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
-    ; CHECK: [[CVTTSS2SI64rr:%[0-9]+]]:gr64 = CVTTSS2SI64rr [[COPY1]]
-    ; CHECK: $rax = COPY [[CVTTSS2SI64rr]]
+    ; CHECK: %2:gr64 = nofpexcept CVTTSS2SI64rr [[COPY1]], implicit $mxcsr
+    ; CHECK: $rax = COPY %2
     ; CHECK: RET 0, implicit $rax
     %1:vecr(s128) = COPY $xmm0
     %0:vecr(s32) = G_TRUNC %1(s128)
@@ -189,8 +189,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
-    ; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY [[CVTTSD2SIrr]].sub_8bit
+    ; CHECK: %3:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: [[COPY2:%[0-9]+]]:gr8 = COPY %3.sub_8bit
     ; CHECK: $al = COPY [[COPY2]]
     ; CHECK: RET 0, implicit $al
     %1:vecr(s128) = COPY $xmm0
@@ -220,8 +220,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
-    ; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY [[CVTTSD2SIrr]].sub_16bit
+    ; CHECK: %3:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: [[COPY2:%[0-9]+]]:gr16 = COPY %3.sub_16bit
     ; CHECK: $ax = COPY [[COPY2]]
     ; CHECK: RET 0, implicit $ax
     %1:vecr(s128) = COPY $xmm0
@@ -250,8 +250,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
-    ; CHECK: [[CVTTSD2SIrr:%[0-9]+]]:gr32 = CVTTSD2SIrr [[COPY1]]
-    ; CHECK: $eax = COPY [[CVTTSD2SIrr]]
+    ; CHECK: %2:gr32 = nofpexcept CVTTSD2SIrr [[COPY1]], implicit $mxcsr
+    ; CHECK: $eax = COPY %2
     ; CHECK: RET 0, implicit $eax
     %1:vecr(s128) = COPY $xmm0
     %0:vecr(s64) = G_TRUNC %1(s128)
@@ -278,8 +278,8 @@ body:             |
     ; CHECK: liveins: $xmm0
     ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
     ; CHECK: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
-    ; CHECK: [[CVTTSD2SI64rr:%[0-9]+]]:gr64 = CVTTSD2SI64rr [[COPY1]]
-    ; CHECK: $rax = COPY [[CVTTSD2SI64rr]]
+    ; CHECK: %2:gr64 = nofpexcept CVTTSD2SI64rr [[COPY1]], implicit $mxcsr
+    ; CHECK: $rax = COPY %2
     ; CHECK: RET 0, implicit $rax
     %1:vecr(s128) = COPY $xmm0
     %0:vecr(s64) = G_TRUNC %1(s128)

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
index 4de8665e1774..0bb60610e06f 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-select-sitofp.mir
@@ -61,8 +61,8 @@ body:             |
     ; CHECK-LABEL: name: int32_to_float
     ; CHECK: liveins: $edi
     ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
-    ; CHECK: [[CVTSI2SSrr:%[0-9]+]]:fr32 = CVTSI2SSrr [[COPY]]
-    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI2SSrr]]
+    ; CHECK: %1:fr32 = nofpexcept CVTSI2SSrr [[COPY]], implicit $mxcsr
+    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
     ; CHECK: $xmm0 = COPY [[COPY1]]
     ; CHECK: RET 0, implicit $xmm0
     %0:gpr(s32) = COPY $edi
@@ -89,8 +89,8 @@ body:             |
     ; CHECK-LABEL: name: int64_to_float
     ; CHECK: liveins: $rdi
     ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-    ; CHECK: [[CVTSI642SSrr:%[0-9]+]]:fr32 = CVTSI642SSrr [[COPY]]
-    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI642SSrr]]
+    ; CHECK: %1:fr32 = nofpexcept CVTSI642SSrr [[COPY]], implicit $mxcsr
+    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
     ; CHECK: $xmm0 = COPY [[COPY1]]
     ; CHECK: RET 0, implicit $xmm0
     %0:gpr(s64) = COPY $rdi
@@ -145,8 +145,8 @@ body:             |
     ; CHECK-LABEL: name: int64_to_double
     ; CHECK: liveins: $rdi
     ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
-    ; CHECK: [[CVTSI642SDrr:%[0-9]+]]:fr64 = CVTSI642SDrr [[COPY]]
-    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY [[CVTSI642SDrr]]
+    ; CHECK: %1:fr64 = nofpexcept CVTSI642SDrr [[COPY]], implicit $mxcsr
+    ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %1
     ; CHECK: $xmm0 = COPY [[COPY1]]
     ; CHECK: RET 0, implicit $xmm0
     %0:gpr(s64) = COPY $rdi


        


More information about the llvm-commits mailing list