[llvm] AMDGPU: Fix handling of -0 in round lowering (PR #65761)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 11 13:46:01 PDT 2023


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/65761:

>From 71f3a06843d653775cae47b8c6625726733f9a37 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Fri, 8 Sep 2023 13:06:37 +0300
Subject: [PATCH] AMDGPU: Fix handling of -0 in round lowering

Move the select of the offset to the copysign input, so we end up
with -0 + -0 which preserves the sign.

The GlobalISel output leaves something to be desired, it emits some
trivially foldable bit ops and also fails to form BFI.

Fixes #65629
---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |   20 +-
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |   10 +-
 .../GlobalISel/legalize-intrinsic-round.mir   |  858 ++--
 llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll |   81 +-
 llvm/test/CodeGen/AMDGPU/fneg-combines.ll     | 3792 +++++++++++++++++
 llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll |   24 +-
 llvm/test/CodeGen/AMDGPU/known-never-snan.ll  |   12 +-
 llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll    |  551 +--
 llvm/test/CodeGen/AMDGPU/llvm.round.ll        | 1083 +++--
 9 files changed, 5179 insertions(+), 1252 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index cfb95955d1f888b..5fcebf2b8df59cd 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -6575,23 +6575,25 @@ LegalizerHelper::lowerIntrinsicRound(MachineInstr &MI) {
   // round(x) =>
   //  t = trunc(x);
   //  d = fabs(x - t);
-  //  o = copysign(1.0f, x);
-  //  return t + (d >= 0.5 ? o : 0.0);
+  //  o = copysign(d >= 0.5 ? 1.0 : 0.0, x);
+  //  return t + o;
 
   auto T = MIRBuilder.buildIntrinsicTrunc(Ty, X, Flags);
 
   auto Diff = MIRBuilder.buildFSub(Ty, X, T, Flags);
   auto AbsDiff = MIRBuilder.buildFAbs(Ty, Diff, Flags);
-  auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
-  auto One = MIRBuilder.buildFConstant(Ty, 1.0);
+
   auto Half = MIRBuilder.buildFConstant(Ty, 0.5);
-  auto SignOne = MIRBuilder.buildFCopysign(Ty, One, X);
+  auto Cmp =
+      MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half, Flags);
 
-  auto Cmp = MIRBuilder.buildFCmp(CmpInst::FCMP_OGE, CondTy, AbsDiff, Half,
-                                  Flags);
-  auto Sel = MIRBuilder.buildSelect(Ty, Cmp, SignOne, Zero, Flags);
+  // Could emit G_UITOFP instead
+  auto One = MIRBuilder.buildFConstant(Ty, 1.0);
+  auto Zero = MIRBuilder.buildFConstant(Ty, 0.0);
+  auto BoolFP = MIRBuilder.buildSelect(Ty, Cmp, One, Zero);
+  auto SignedOffset = MIRBuilder.buildFCopysign(Ty, BoolFP, X);
 
-  MIRBuilder.buildFAdd(DstReg, T, Sel, Flags);
+  MIRBuilder.buildFAdd(DstReg, T, SignedOffset, Flags);
 
   MI.eraseFromParent();
   return Legalized;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index d84a50975430514..02332a9fe9c0c40 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -2429,18 +2429,16 @@ SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
 
   const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
   const SDValue One = DAG.getConstantFP(1.0, SL, VT);
-  const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
-
-  SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
 
   EVT SetCCVT =
       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
 
+  const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
+  SDValue OneOrZeroFP = DAG.getNode(ISD::SELECT, SL, VT, Cmp, One, Zero);
 
-  SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
-
-  return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
+  SDValue SignedOffset = DAG.getNode(ISD::FCOPYSIGN, SL, VT, OneOrZeroFP, X);
+  return DAG.getNode(ISD::FADD, SL, VT, T, SignedOffset);
 }
 
 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
index 1a70e500d36f54c..b97d04e809a6f7d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-intrinsic-round.mir
@@ -18,16 +18,19 @@ body: |
     ; GFX6-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
     ; GFX6-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX6-NEXT: $vgpr0 = COPY [[FADD]](s32)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_s32
     ; GFX8: liveins: $vgpr0
     ; GFX8-NEXT: {{  $}}
@@ -35,16 +38,19 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: $vgpr0 = COPY [[FADD]](s32)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_s32
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
@@ -52,15 +58,17 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[COPY]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: $vgpr0 = COPY [[FADD]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = G_INTRINSIC_ROUND %0
@@ -80,16 +88,19 @@ body: |
     ; GFX6-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nsz G_INTRINSIC_TRUNC [[COPY]]
     ; GFX6-NEXT: [[FSUB:%[0-9]+]]:_(s32) = nsz G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s32) = nsz G_FABS [[FSUB]]
-    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = nsz G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX6-NEXT: $vgpr0 = COPY [[FADD]](s32)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_s32_flags
     ; GFX8: liveins: $vgpr0
     ; GFX8-NEXT: {{  $}}
@@ -97,16 +108,19 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nsz G_INTRINSIC_TRUNC [[COPY]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s32) = nsz G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s32) = nsz G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = nsz G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: $vgpr0 = COPY [[FADD]](s32)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_s32_flags
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
@@ -114,15 +128,17 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = nsz G_INTRINSIC_TRUNC [[COPY]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s32) = nsz G_FSUB [[COPY]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s32) = nsz G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = nsz G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = nsz G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = nsz G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: $vgpr0 = COPY [[FADD]](s32)
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = nsz G_INTRINSIC_ROUND %0
@@ -162,16 +178,19 @@ body: |
     ; GFX6-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[FNEG]]
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX6-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX6-NEXT: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX6-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX6-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C10]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND2]]
-    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C9]]
-    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
-    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
+    ; GFX6-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C8]]
+    ; GFX6-NEXT: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX6-NEXT: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C9]], [[C10]]
+    ; GFX6-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX6-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SELECT2]], [[C12]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C11]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND2]], [[AND3]]
+    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[OR]]
     ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[FADD1]](s64)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_s64
     ; GFX8: liveins: $vgpr0_vgpr1
     ; GFX8-NEXT: {{  $}}
@@ -180,16 +199,19 @@ body: |
     ; GFX8-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[FNEG]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[FADD1]](s64)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_s64
     ; GFX9: liveins: $vgpr0_vgpr1
     ; GFX9-NEXT: {{  $}}
@@ -198,15 +220,17 @@ body: |
     ; GFX9-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[FNEG]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[FADD1]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = G_INTRINSIC_ROUND %0
@@ -227,25 +251,29 @@ body: |
     ; GFX6-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
     ; GFX6-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[INTRINSIC_TRUNC]]
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C3]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX6-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
     ; GFX6-NEXT: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[INTRINSIC_TRUNC1]]
     ; GFX6-NEXT: [[FABS1:%[0-9]+]]:_(s32) = G_FABS [[FSUB1]]
-    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
-    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND1]]
-    ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C1]]
-    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C]]
-    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C]]
+    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[C1]], [[C2]]
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT1]], [[C4]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C3]]
+    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[AND3]]
+    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32)
     ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_v2s32
     ; GFX8: liveins: $vgpr0_vgpr1
     ; GFX8-NEXT: {{  $}}
@@ -254,25 +282,29 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
     ; GFX8-NEXT: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[INTRINSIC_TRUNC1]]
     ; GFX8-NEXT: [[FABS1:%[0-9]+]]:_(s32) = G_FABS [[FSUB1]]
-    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
-    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND1]]
-    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C1]]
-    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C]]
+    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT1]], [[C4]]
+    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C3]]
+    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[AND3]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32)
     ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_v2s32
     ; GFX9: liveins: $vgpr0_vgpr1
     ; GFX9-NEXT: {{  $}}
@@ -281,23 +313,26 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[UV]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s32) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1065353216
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 5.000000e-01
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s32), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s32) = G_INTRINSIC_TRUNC [[UV1]]
     ; GFX9-NEXT: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB [[UV1]], [[INTRINSIC_TRUNC1]]
     ; GFX9-NEXT: [[FABS1:%[0-9]+]]:_(s32) = G_FABS [[FSUB1]]
-    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C2]]
-    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[C3]], [[AND1]]
-    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C1]]
-    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s32), [[C]]
+    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s32) = G_AND [[SELECT1]], [[C4]]
+    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[C3]]
+    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[AND3]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FADD]](s32), [[FADD1]](s32)
     ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
     %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
@@ -339,37 +374,41 @@ body: |
     ; GFX6-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[SELECT1]]
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG]]
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX6-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX6-NEXT: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX6-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX6-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C10]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND2]]
-    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C9]]
-    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C8]]
-    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[SELECT2]]
+    ; GFX6-NEXT: [[C8:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C8]]
+    ; GFX6-NEXT: [[C9:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX6-NEXT: [[C10:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C9]], [[C10]]
+    ; GFX6-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX6-NEXT: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SELECT2]], [[C12]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C11]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND2]], [[AND3]]
+    ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[SELECT1]], [[OR]]
     ; GFX6-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
     ; GFX6-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.ubfe), [[UV5]](s32), [[C]](s32), [[C1]](s32)
     ; GFX6-NEXT: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[INT1]], [[C2]]
-    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
-    ; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND3]](s32)
+    ; GFX6-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV5]], [[C3]]
+    ; GFX6-NEXT: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C5]](s32), [[AND4]](s32)
     ; GFX6-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[C4]], [[SUB1]](s32)
     ; GFX6-NEXT: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[ASHR1]], [[C6]]
-    ; GFX6-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
+    ; GFX6-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[XOR1]]
     ; GFX6-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SUB1]](s32), [[C5]]
     ; GFX6-NEXT: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SUB1]](s32), [[C7]]
-    ; GFX6-NEXT: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND4]]
+    ; GFX6-NEXT: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[MV1]], [[AND5]]
     ; GFX6-NEXT: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV1]], [[SELECT3]]
     ; GFX6-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[SELECT4]]
     ; GFX6-NEXT: [[FADD2:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG1]]
     ; GFX6-NEXT: [[FABS1:%[0-9]+]]:_(s64) = G_FABS [[FADD2]]
-    ; GFX6-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C10]]
-    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[C11]], [[AND5]]
-    ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C9]]
-    ; GFX6-NEXT: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C8]]
-    ; GFX6-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[SELECT4]], [[SELECT5]]
+    ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C8]]
+    ; GFX6-NEXT: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[C9]], [[C10]]
+    ; GFX6-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SELECT5]], [[C12]]
+    ; GFX6-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C11]]
+    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[AND6]], [[AND7]]
+    ; GFX6-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[SELECT4]], [[OR1]]
     ; GFX6-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD1]](s64), [[FADD3]](s64)
     ; GFX6-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_v2s64
     ; GFX8: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX8-NEXT: {{  $}}
@@ -379,26 +418,30 @@ body: |
     ; GFX8-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
     ; GFX8-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC1]]
     ; GFX8-NEXT: [[FADD2:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG1]]
     ; GFX8-NEXT: [[FABS1:%[0-9]+]]:_(s64) = G_FABS [[FADD2]]
-    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C2]]
-    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND1]]
-    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C1]]
-    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C]]
-    ; GFX8-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C]]
+    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SELECT1]], [[C4]]
+    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C3]]
+    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[AND2]], [[AND3]]
+    ; GFX8-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX8-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD1]](s64), [[FADD3]](s64)
     ; GFX8-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_v2s64
     ; GFX9: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
     ; GFX9-NEXT: {{  $}}
@@ -408,24 +451,27 @@ body: |
     ; GFX9-NEXT: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[UV]], [[FNEG]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[FADD]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 4607182418800017408
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 5.000000e-01
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s64), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 9223372036854775807
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s64) = G_INTRINSIC_TRUNC [[UV1]]
     ; GFX9-NEXT: [[FNEG1:%[0-9]+]]:_(s64) = G_FNEG [[INTRINSIC_TRUNC1]]
     ; GFX9-NEXT: [[FADD2:%[0-9]+]]:_(s64) = G_FADD [[UV1]], [[FNEG1]]
     ; GFX9-NEXT: [[FABS1:%[0-9]+]]:_(s64) = G_FABS [[FADD2]]
-    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C2]]
-    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[C3]], [[AND1]]
-    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C1]]
-    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C]]
-    ; GFX9-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s64), [[C]]
+    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[FCMP1]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SELECT1]], [[C4]]
+    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C3]]
+    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s64) = G_OR [[AND2]], [[AND3]]
+    ; GFX9-NEXT: [[FADD3:%[0-9]+]]:_(s64) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[FADD1]](s64), [[FADD3]](s64)
     ; GFX9-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
     %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
@@ -453,22 +499,25 @@ body: |
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT1]], [[FPEXT2]]
     ; GFX6-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC1]]
-    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C2]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C3]], [[AND]]
+    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
     ; GFX6-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[FABS]](s16)
-    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
+    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C]](s16)
     ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT3]](s32), [[FPEXT4]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C4]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
-    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT]](s16)
+    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[OR]](s16)
     ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT5]], [[FPEXT6]]
     ; GFX6-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
     ; GFX6-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FPTRUNC2]](s16)
     ; GFX6-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_s16
     ; GFX8: liveins: $vgpr0
     ; GFX8-NEXT: {{  $}}
@@ -477,17 +526,20 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C2]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C3]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C4]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX8-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_s16
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
@@ -496,15 +548,17 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C2]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C3]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C1]], [[C2]]
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C4]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
     %0:_(s32) = COPY $vgpr0
@@ -538,18 +592,20 @@ body: |
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT1]], [[FPEXT2]]
     ; GFX6-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC1]]
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
     ; GFX6-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[FABS]](s16)
-    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT3]](s32), [[FPEXT4]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX6-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
-    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT]](s16)
+    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[OR]](s16)
     ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT5]], [[FPEXT6]]
     ; GFX6-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
     ; GFX6-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
@@ -561,14 +617,15 @@ body: |
     ; GFX6-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FPEXT8]], [[FPEXT9]]
     ; GFX6-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
     ; GFX6-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC4]]
-    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[FABS1]](s16)
-    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT10]](s32), [[FPEXT11]]
-    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
+    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
     ; GFX6-NEXT: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC3]](s16)
-    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT1]](s16)
+    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[OR1]](s16)
     ; GFX6-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FPEXT12]], [[FPEXT13]]
     ; GFX6-NEXT: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
     ; GFX6-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC2]](s16)
@@ -577,6 +634,7 @@ body: |
     ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
     ; GFX6-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX6-NEXT: $vgpr0 = COPY [[BITCAST1]](<2 x s16>)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_v2s16
     ; GFX8: liveins: $vgpr0
     ; GFX8-NEXT: {{  $}}
@@ -589,29 +647,33 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX8-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX8-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX8-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FADD]](s16)
     ; GFX8-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FADD1]](s16)
     ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
     ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
     ; GFX8-NEXT: [[BITCAST1:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR2]](s32)
     ; GFX8-NEXT: $vgpr0 = COPY [[BITCAST1]](<2 x s16>)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_v2s16
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
@@ -624,23 +686,26 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX9-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX9-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX9-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FADD]](s16), [[FADD1]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
     %0:_(<2 x s16>) = COPY $vgpr0
@@ -674,18 +739,20 @@ body: |
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT1]], [[FPEXT2]]
     ; GFX6-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC1]]
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
     ; GFX6-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[FABS]](s16)
-    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT3]](s32), [[FPEXT4]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX6-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
-    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT]](s16)
+    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[OR]](s16)
     ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT5]], [[FPEXT6]]
     ; GFX6-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
     ; GFX6-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
@@ -697,14 +764,15 @@ body: |
     ; GFX6-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FPEXT8]], [[FPEXT9]]
     ; GFX6-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
     ; GFX6-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC4]]
-    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[FABS1]](s16)
-    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT10]](s32), [[FPEXT11]]
-    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
+    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
     ; GFX6-NEXT: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC3]](s16)
-    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT1]](s16)
+    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[OR1]](s16)
     ; GFX6-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FPEXT12]], [[FPEXT13]]
     ; GFX6-NEXT: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
     ; GFX6-NEXT: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
@@ -716,14 +784,15 @@ body: |
     ; GFX6-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FPEXT15]], [[FPEXT16]]
     ; GFX6-NEXT: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD4]](s32)
     ; GFX6-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC7]]
-    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
     ; GFX6-NEXT: [[FPEXT17:%[0-9]+]]:_(s32) = G_FPEXT [[FABS2]](s16)
-    ; GFX6-NEXT: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT17]](s32), [[FPEXT18]]
-    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
+    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX6-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
     ; GFX6-NEXT: [[FPEXT19:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC6]](s16)
-    ; GFX6-NEXT: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT2]](s16)
+    ; GFX6-NEXT: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[OR2]](s16)
     ; GFX6-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FPEXT19]], [[FPEXT20]]
     ; GFX6-NEXT: [[FPTRUNC8:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD5]](s32)
     ; GFX6-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -737,18 +806,19 @@ body: |
     ; GFX6-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
     ; GFX6-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
     ; GFX6-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC8]](s16)
-    ; GFX6-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
-    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[BITCAST2]], [[C5]]
-    ; GFX6-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+    ; GFX6-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; GFX6-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[BITCAST2]], [[C6]]
+    ; GFX6-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C]](s32)
     ; GFX6-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
     ; GFX6-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR4]](s32)
-    ; GFX6-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C5]]
-    ; GFX6-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[BITCAST3]], [[C5]]
-    ; GFX6-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C]](s32)
-    ; GFX6-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
+    ; GFX6-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C6]]
+    ; GFX6-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[BITCAST3]], [[C6]]
+    ; GFX6-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND8]], [[C]](s32)
+    ; GFX6-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND7]], [[SHL2]]
     ; GFX6-NEXT: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
     ; GFX6-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>), [[BITCAST6]](<2 x s16>)
     ; GFX6-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_v3s16
     ; GFX8: liveins: $vgpr0_vgpr1_vgpr2
     ; GFX8-NEXT: {{  $}}
@@ -764,31 +834,35 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX8-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX8-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX8-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC2:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC2]]
     ; GFX8-NEXT: [[FSUB2:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC2]], [[INTRINSIC_TRUNC2]]
     ; GFX8-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FSUB2]]
-    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
-    ; GFX8-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
-    ; GFX8-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[SELECT2]]
+    ; GFX8-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX8-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
+    ; GFX8-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[OR2]]
     ; GFX8-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX8-NEXT: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
     ; GFX8-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
@@ -800,18 +874,19 @@ body: |
     ; GFX8-NEXT: [[OR3:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
     ; GFX8-NEXT: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR3]](s32)
     ; GFX8-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[FADD2]](s16)
-    ; GFX8-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
-    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s32) = G_AND [[BITCAST2]], [[C5]]
-    ; GFX8-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+    ; GFX8-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+    ; GFX8-NEXT: [[AND6:%[0-9]+]]:_(s32) = G_AND [[BITCAST2]], [[C6]]
+    ; GFX8-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND6]], [[C]](s32)
     ; GFX8-NEXT: [[OR4:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
     ; GFX8-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR4]](s32)
-    ; GFX8-NEXT: [[AND4:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C5]]
-    ; GFX8-NEXT: [[AND5:%[0-9]+]]:_(s32) = G_AND [[BITCAST3]], [[C5]]
-    ; GFX8-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND5]], [[C]](s32)
-    ; GFX8-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND4]], [[SHL2]]
+    ; GFX8-NEXT: [[AND7:%[0-9]+]]:_(s32) = G_AND [[LSHR1]], [[C6]]
+    ; GFX8-NEXT: [[AND8:%[0-9]+]]:_(s32) = G_AND [[BITCAST3]], [[C6]]
+    ; GFX8-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND8]], [[C]](s32)
+    ; GFX8-NEXT: [[OR5:%[0-9]+]]:_(s32) = G_OR [[AND7]], [[SHL2]]
     ; GFX8-NEXT: [[BITCAST6:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
     ; GFX8-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>), [[BITCAST6]](<2 x s16>)
     ; GFX8-NEXT: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_v3s16
     ; GFX9: liveins: $vgpr0_vgpr1_vgpr2
     ; GFX9-NEXT: {{  $}}
@@ -827,31 +902,35 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX9-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX9-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX9-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC2:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC2]]
     ; GFX9-NEXT: [[FSUB2:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC2]], [[INTRINSIC_TRUNC2]]
     ; GFX9-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FSUB2]]
-    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
-    ; GFX9-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
-    ; GFX9-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[SELECT2]]
+    ; GFX9-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX9-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
+    ; GFX9-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[OR2]]
     ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
     ; GFX9-NEXT: [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[DEF]](<4 x s16>)
     ; GFX9-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
@@ -902,18 +981,20 @@ body: |
     ; GFX6-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FPEXT1]], [[FPEXT2]]
     ; GFX6-NEXT: [[FPTRUNC1:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD]](s32)
     ; GFX6-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC1]]
-    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
+    ; GFX6-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
     ; GFX6-NEXT: [[FPEXT3:%[0-9]+]]:_(s32) = G_FPEXT [[FABS]](s16)
-    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT4:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT3]](s32), [[FPEXT4]]
-    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
+    ; GFX6-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX6-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX6-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX6-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX6-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX6-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT5:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC]](s16)
-    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT]](s16)
+    ; GFX6-NEXT: [[FPEXT6:%[0-9]+]]:_(s32) = G_FPEXT [[OR]](s16)
     ; GFX6-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD [[FPEXT5]], [[FPEXT6]]
     ; GFX6-NEXT: [[FPTRUNC2:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD1]](s32)
     ; GFX6-NEXT: [[FPEXT7:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC1]](s16)
@@ -925,14 +1006,15 @@ body: |
     ; GFX6-NEXT: [[FADD2:%[0-9]+]]:_(s32) = G_FADD [[FPEXT8]], [[FPEXT9]]
     ; GFX6-NEXT: [[FPTRUNC4:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD2]](s32)
     ; GFX6-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC4]]
-    ; GFX6-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
     ; GFX6-NEXT: [[FPEXT10:%[0-9]+]]:_(s32) = G_FPEXT [[FABS1]](s16)
-    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT11:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT10]](s32), [[FPEXT11]]
-    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
+    ; GFX6-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX6-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
     ; GFX6-NEXT: [[FPEXT12:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC3]](s16)
-    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT1]](s16)
+    ; GFX6-NEXT: [[FPEXT13:%[0-9]+]]:_(s32) = G_FPEXT [[OR1]](s16)
     ; GFX6-NEXT: [[FADD3:%[0-9]+]]:_(s32) = G_FADD [[FPEXT12]], [[FPEXT13]]
     ; GFX6-NEXT: [[FPTRUNC5:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD3]](s32)
     ; GFX6-NEXT: [[FPEXT14:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC2]](s16)
@@ -944,14 +1026,15 @@ body: |
     ; GFX6-NEXT: [[FADD4:%[0-9]+]]:_(s32) = G_FADD [[FPEXT15]], [[FPEXT16]]
     ; GFX6-NEXT: [[FPTRUNC7:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD4]](s32)
     ; GFX6-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC7]]
-    ; GFX6-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
     ; GFX6-NEXT: [[FPEXT17:%[0-9]+]]:_(s32) = G_FPEXT [[FABS2]](s16)
-    ; GFX6-NEXT: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT18:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT17]](s32), [[FPEXT18]]
-    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
+    ; GFX6-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX6-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX6-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
     ; GFX6-NEXT: [[FPEXT19:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC6]](s16)
-    ; GFX6-NEXT: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT2]](s16)
+    ; GFX6-NEXT: [[FPEXT20:%[0-9]+]]:_(s32) = G_FPEXT [[OR2]](s16)
     ; GFX6-NEXT: [[FADD5:%[0-9]+]]:_(s32) = G_FADD [[FPEXT19]], [[FPEXT20]]
     ; GFX6-NEXT: [[FPTRUNC8:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD5]](s32)
     ; GFX6-NEXT: [[FPEXT21:%[0-9]+]]:_(s32) = G_FPEXT [[TRUNC3]](s16)
@@ -963,14 +1046,15 @@ body: |
     ; GFX6-NEXT: [[FADD6:%[0-9]+]]:_(s32) = G_FADD [[FPEXT22]], [[FPEXT23]]
     ; GFX6-NEXT: [[FPTRUNC10:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD6]](s32)
     ; GFX6-NEXT: [[FABS3:%[0-9]+]]:_(s16) = G_FABS [[FPTRUNC10]]
-    ; GFX6-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]]
-    ; GFX6-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND3]]
     ; GFX6-NEXT: [[FPEXT24:%[0-9]+]]:_(s32) = G_FPEXT [[FABS3]](s16)
-    ; GFX6-NEXT: [[FPEXT25:%[0-9]+]]:_(s32) = G_FPEXT [[C2]](s16)
+    ; GFX6-NEXT: [[FPEXT25:%[0-9]+]]:_(s32) = G_FPEXT [[C1]](s16)
     ; GFX6-NEXT: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FPEXT24]](s32), [[FPEXT25]]
-    ; GFX6-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[OR3]], [[C1]]
+    ; GFX6-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[C2]], [[C3]]
+    ; GFX6-NEXT: [[AND6:%[0-9]+]]:_(s16) = G_AND [[SELECT3]], [[C5]]
+    ; GFX6-NEXT: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C4]]
+    ; GFX6-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[AND7]]
     ; GFX6-NEXT: [[FPEXT26:%[0-9]+]]:_(s32) = G_FPEXT [[FPTRUNC9]](s16)
-    ; GFX6-NEXT: [[FPEXT27:%[0-9]+]]:_(s32) = G_FPEXT [[SELECT3]](s16)
+    ; GFX6-NEXT: [[FPEXT27:%[0-9]+]]:_(s32) = G_FPEXT [[OR3]](s16)
     ; GFX6-NEXT: [[FADD7:%[0-9]+]]:_(s32) = G_FADD [[FPEXT26]], [[FPEXT27]]
     ; GFX6-NEXT: [[FPTRUNC11:%[0-9]+]]:_(s16) = G_FPTRUNC [[FADD7]](s32)
     ; GFX6-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FPTRUNC2]](s16)
@@ -985,6 +1069,7 @@ body: |
     ; GFX6-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
     ; GFX6-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>)
     ; GFX6-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+    ;
     ; GFX8-LABEL: name: test_intrinsic_round_v4s16
     ; GFX8: liveins: $vgpr0_vgpr1
     ; GFX8-NEXT: {{  $}}
@@ -1002,39 +1087,44 @@ body: |
     ; GFX8-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX8-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX8-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX8-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX8-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX8-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX8-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX8-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX8-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX8-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX8-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX8-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX8-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX8-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX8-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX8-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX8-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX8-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC2:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC2]]
     ; GFX8-NEXT: [[FSUB2:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC2]], [[INTRINSIC_TRUNC2]]
     ; GFX8-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FSUB2]]
-    ; GFX8-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
-    ; GFX8-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
-    ; GFX8-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[SELECT2]]
+    ; GFX8-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX8-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX8-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
+    ; GFX8-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[OR2]]
     ; GFX8-NEXT: [[INTRINSIC_TRUNC3:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC3]]
     ; GFX8-NEXT: [[FSUB3:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC3]], [[INTRINSIC_TRUNC3]]
     ; GFX8-NEXT: [[FABS3:%[0-9]+]]:_(s16) = G_FABS [[FSUB3]]
-    ; GFX8-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]]
-    ; GFX8-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND3]]
-    ; GFX8-NEXT: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS3]](s16), [[C2]]
-    ; GFX8-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[OR3]], [[C1]]
-    ; GFX8-NEXT: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC3]], [[SELECT3]]
+    ; GFX8-NEXT: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS3]](s16), [[C1]]
+    ; GFX8-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[C2]], [[C3]]
+    ; GFX8-NEXT: [[AND6:%[0-9]+]]:_(s16) = G_AND [[SELECT3]], [[C5]]
+    ; GFX8-NEXT: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C4]]
+    ; GFX8-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[AND7]]
+    ; GFX8-NEXT: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC3]], [[OR3]]
     ; GFX8-NEXT: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FADD]](s16)
     ; GFX8-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[FADD1]](s16)
     ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
@@ -1047,6 +1137,7 @@ body: |
     ; GFX8-NEXT: [[BITCAST3:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR5]](s32)
     ; GFX8-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST2]](<2 x s16>), [[BITCAST3]](<2 x s16>)
     ; GFX8-NEXT: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+    ;
     ; GFX9-LABEL: name: test_intrinsic_round_v4s16
     ; GFX9: liveins: $vgpr0_vgpr1
     ; GFX9-NEXT: {{  $}}
@@ -1064,39 +1155,44 @@ body: |
     ; GFX9-NEXT: [[INTRINSIC_TRUNC:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC]]
     ; GFX9-NEXT: [[FSUB:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC]], [[INTRINSIC_TRUNC]]
     ; GFX9-NEXT: [[FABS:%[0-9]+]]:_(s16) = G_FABS [[FSUB]]
-    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
-    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
-    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
-    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 15360
-    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
-    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND]]
-    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[OR]], [[C1]]
-    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[SELECT]]
+    ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3800
+    ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS]](s16), [[C1]]
+    ; GFX9-NEXT: [[C2:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00
+    ; GFX9-NEXT: [[C3:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
+    ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+    ; GFX9-NEXT: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+    ; GFX9-NEXT: [[AND:%[0-9]+]]:_(s16) = G_AND [[SELECT]], [[C5]]
+    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+    ; GFX9-NEXT: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[AND1]]
+    ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC]], [[OR]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC1:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC1]]
     ; GFX9-NEXT: [[FSUB1:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC1]], [[INTRINSIC_TRUNC1]]
     ; GFX9-NEXT: [[FABS1:%[0-9]+]]:_(s16) = G_FABS [[FSUB1]]
-    ; GFX9-NEXT: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C3]]
-    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND1]]
-    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[OR1]], [[C1]]
-    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[SELECT1]]
+    ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS1]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[SELECT1]], [[C5]]
+    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C4]]
+    ; GFX9-NEXT: [[OR1:%[0-9]+]]:_(s16) = G_OR [[AND2]], [[AND3]]
+    ; GFX9-NEXT: [[FADD1:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC1]], [[OR1]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC2:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC2]]
     ; GFX9-NEXT: [[FSUB2:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC2]], [[INTRINSIC_TRUNC2]]
     ; GFX9-NEXT: [[FABS2:%[0-9]+]]:_(s16) = G_FABS [[FSUB2]]
-    ; GFX9-NEXT: [[AND2:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C3]]
-    ; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND2]]
-    ; GFX9-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[OR2]], [[C1]]
-    ; GFX9-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[SELECT2]]
+    ; GFX9-NEXT: [[FCMP2:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS2]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT2:%[0-9]+]]:_(s16) = G_SELECT [[FCMP2]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND4:%[0-9]+]]:_(s16) = G_AND [[SELECT2]], [[C5]]
+    ; GFX9-NEXT: [[AND5:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C4]]
+    ; GFX9-NEXT: [[OR2:%[0-9]+]]:_(s16) = G_OR [[AND4]], [[AND5]]
+    ; GFX9-NEXT: [[FADD2:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC2]], [[OR2]]
     ; GFX9-NEXT: [[INTRINSIC_TRUNC3:%[0-9]+]]:_(s16) = G_INTRINSIC_TRUNC [[TRUNC3]]
     ; GFX9-NEXT: [[FSUB3:%[0-9]+]]:_(s16) = G_FSUB [[TRUNC3]], [[INTRINSIC_TRUNC3]]
     ; GFX9-NEXT: [[FABS3:%[0-9]+]]:_(s16) = G_FABS [[FSUB3]]
-    ; GFX9-NEXT: [[AND3:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C3]]
-    ; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[C4]], [[AND3]]
-    ; GFX9-NEXT: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS3]](s16), [[C2]]
-    ; GFX9-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[OR3]], [[C1]]
-    ; GFX9-NEXT: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC3]], [[SELECT3]]
+    ; GFX9-NEXT: [[FCMP3:%[0-9]+]]:_(s1) = G_FCMP floatpred(oge), [[FABS3]](s16), [[C1]]
+    ; GFX9-NEXT: [[SELECT3:%[0-9]+]]:_(s16) = G_SELECT [[FCMP3]](s1), [[C2]], [[C3]]
+    ; GFX9-NEXT: [[AND6:%[0-9]+]]:_(s16) = G_AND [[SELECT3]], [[C5]]
+    ; GFX9-NEXT: [[AND7:%[0-9]+]]:_(s16) = G_AND [[TRUNC3]], [[C4]]
+    ; GFX9-NEXT: [[OR3:%[0-9]+]]:_(s16) = G_OR [[AND6]], [[AND7]]
+    ; GFX9-NEXT: [[FADD3:%[0-9]+]]:_(s16) = G_FADD [[INTRINSIC_TRUNC3]], [[OR3]]
     ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FADD]](s16), [[FADD1]](s16)
     ; GFX9-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[FADD2]](s16), [[FADD3]](s16)
     ; GFX9-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<2 x s16>), [[BUILD_VECTOR1]](<2 x s16>)
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
index e324b27f3f4ba07..592be64e1e31429 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.f16.ll
@@ -3794,14 +3794,14 @@ define half @v_fneg_round_f16(half %a) #0 {
 ; SI-SAFE:       ; %bb.0:
 ; SI-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SI-SAFE-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT:    s_brev_b32 s4, -2
 ; SI-SAFE-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT:    v_trunc_f32_e32 v2, v0
-; SI-SAFE-NEXT:    v_bfi_b32 v1, s4, 1.0, v0
-; SI-SAFE-NEXT:    v_sub_f32_e32 v0, v0, v2
-; SI-SAFE-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; SI-SAFE-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SAFE-NEXT:    v_add_f32_e32 v0, v2, v0
+; SI-SAFE-NEXT:    v_trunc_f32_e32 v1, v0
+; SI-SAFE-NEXT:    v_sub_f32_e32 v2, v0, v1
+; SI-SAFE-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, 0.5
+; SI-SAFE-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[4:5]
+; SI-SAFE-NEXT:    s_brev_b32 s4, -2
+; SI-SAFE-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; SI-SAFE-NEXT:    v_add_f32_e32 v0, v1, v0
 ; SI-SAFE-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; SI-SAFE-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -3809,56 +3809,55 @@ define half @v_fneg_round_f16(half %a) #0 {
 ; SI-NSZ:       ; %bb.0:
 ; SI-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; SI-NSZ-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; SI-NSZ-NEXT:    s_brev_b32 s4, -2
 ; SI-NSZ-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-NSZ-NEXT:    v_trunc_f32_e32 v2, v0
-; SI-NSZ-NEXT:    v_bfi_b32 v1, s4, 1.0, v0
-; SI-NSZ-NEXT:    v_sub_f32_e32 v0, v0, v2
-; SI-NSZ-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; SI-NSZ-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-NSZ-NEXT:    v_sub_f32_e64 v0, -v2, v0
+; SI-NSZ-NEXT:    v_trunc_f32_e32 v1, v0
+; SI-NSZ-NEXT:    v_sub_f32_e32 v2, v0, v1
+; SI-NSZ-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, 0.5
+; SI-NSZ-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[4:5]
+; SI-NSZ-NEXT:    s_brev_b32 s4, -2
+; SI-NSZ-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; SI-NSZ-NEXT:    v_sub_f32_e64 v0, -v1, v0
 ; SI-NSZ-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-SAFE-LABEL: v_fneg_round_f16:
 ; VI-SAFE:       ; %bb.0:
 ; VI-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-SAFE-NEXT:    v_trunc_f16_e32 v1, v0
+; VI-SAFE-NEXT:    v_sub_f16_e32 v2, v0, v1
+; VI-SAFE-NEXT:    v_mov_b32_e32 v3, 0x3c00
+; VI-SAFE-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
+; VI-SAFE-NEXT:    v_cndmask_b32_e32 v2, 0, v3, vcc
 ; VI-SAFE-NEXT:    s_movk_i32 s4, 0x7fff
-; VI-SAFE-NEXT:    v_mov_b32_e32 v1, 0x3c00
-; VI-SAFE-NEXT:    v_trunc_f16_e32 v2, v0
-; VI-SAFE-NEXT:    v_bfi_b32 v1, s4, v1, v0
-; VI-SAFE-NEXT:    v_sub_f16_e32 v0, v0, v2
-; VI-SAFE-NEXT:    v_cmp_ge_f16_e64 vcc, |v0|, 0.5
-; VI-SAFE-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; VI-SAFE-NEXT:    v_add_f16_e32 v0, v2, v0
+; VI-SAFE-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; VI-SAFE-NEXT:    v_add_f16_e32 v0, v1, v0
 ; VI-SAFE-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; VI-SAFE-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-NSZ-LABEL: v_fneg_round_f16:
 ; VI-NSZ:       ; %bb.0:
 ; VI-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NSZ-NEXT:    v_trunc_f16_e32 v1, v0
+; VI-NSZ-NEXT:    v_sub_f16_e32 v2, v0, v1
+; VI-NSZ-NEXT:    v_mov_b32_e32 v3, 0x3c00
+; VI-NSZ-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
+; VI-NSZ-NEXT:    v_cndmask_b32_e32 v2, 0, v3, vcc
 ; VI-NSZ-NEXT:    s_movk_i32 s4, 0x7fff
-; VI-NSZ-NEXT:    v_mov_b32_e32 v1, 0x3c00
-; VI-NSZ-NEXT:    v_trunc_f16_e32 v2, v0
-; VI-NSZ-NEXT:    v_bfi_b32 v1, s4, v1, v0
-; VI-NSZ-NEXT:    v_sub_f16_e32 v0, v0, v2
-; VI-NSZ-NEXT:    v_cmp_ge_f16_e64 vcc, |v0|, 0.5
-; VI-NSZ-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; VI-NSZ-NEXT:    v_sub_f16_e64 v0, -v2, v0
+; VI-NSZ-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; VI-NSZ-NEXT:    v_sub_f16_e64 v0, -v1, v0
 ; VI-NSZ-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-SAFE-LABEL: v_fneg_round_f16:
 ; GFX11-SAFE:       ; %bb.0:
 ; GFX11-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-SAFE-NEXT:    v_trunc_f16_e32 v1, v0
-; GFX11-SAFE-NEXT:    s_movk_i32 s0, 0x3c00
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_sub_f16_e32 v2, v0, v1
-; GFX11-SAFE-NEXT:    v_bfi_b32 v0, 0x7fff, s0, v0
-; GFX11-SAFE-NEXT:    v_cmp_ge_f16_e64 vcc_lo, |v2|, 0.5
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-SAFE-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX11-SAFE-NEXT:    v_cmp_ge_f16_e64 s0, |v2|, 0.5
+; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SAFE-NEXT:    v_cndmask_b32_e64 v2, 0, 0x3c00, s0
+; GFX11-SAFE-NEXT:    v_bfi_b32 v0, 0x7fff, v2, v0
+; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_add_f16_e32 v0, v1, v0
-; GFX11-SAFE-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-SAFE-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; GFX11-SAFE-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -3866,13 +3865,13 @@ define half @v_fneg_round_f16(half %a) #0 {
 ; GFX11-NSZ:       ; %bb.0:
 ; GFX11-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX11-NSZ-NEXT:    v_trunc_f16_e32 v1, v0
-; GFX11-NSZ-NEXT:    s_movk_i32 s0, 0x3c00
-; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NSZ-NEXT:    v_sub_f16_e32 v2, v0, v1
-; GFX11-NSZ-NEXT:    v_bfi_b32 v0, 0x7fff, s0, v0
-; GFX11-NSZ-NEXT:    v_cmp_ge_f16_e64 vcc_lo, |v2|, 0.5
-; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NSZ-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc_lo
+; GFX11-NSZ-NEXT:    v_cmp_ge_f16_e64 s0, |v2|, 0.5
+; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NSZ-NEXT:    v_cndmask_b32_e64 v2, 0, 0x3c00, s0
+; GFX11-NSZ-NEXT:    v_bfi_b32 v0, 0x7fff, v2, v0
+; GFX11-NSZ-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NSZ-NEXT:    v_sub_f16_e64 v0, -v1, v0
 ; GFX11-NSZ-NEXT:    s_setpc_b64 s[30:31]
   %round = call half @llvm.round.f16(half %a)
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 05e980c91516e9f..8e56c34bcfc39eb 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
 ; RUN: llc -march=amdgcn -mcpu=hawaii -start-before=amdgpu-unify-divergent-exit-nodes -mattr=+flat-for-global < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,SI %s
 ; RUN: llc -enable-no-signed-zeros-fp-math -march=amdgcn -mcpu=hawaii -mattr=+flat-for-global -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,SI %s
 
@@ -41,6 +42,57 @@ define amdgpu_kernel void @v_fneg_add_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_add_store_use_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_add_store_use_add_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_add_store_use_add_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -257,6 +309,17 @@ define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i
 ; GCN-NSZ-DAG: v_cmp_nlt_f32_e32 {{.*}}, 0
 ; GCN-NSZ-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, [[C]], 0,
 define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #2 {
+; GCN-NSZ-LABEL: fneg_fadd_0_nsz:
+; GCN-NSZ:       ; %bb.0: ; %.entry
+; GCN-NSZ-NEXT:    v_rcp_f32_e32 v0, s1
+; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NSZ-NEXT:    v_mul_f32_e32 v0, 0, v0
+; GCN-NSZ-NEXT:    v_cmp_ngt_f32_e32 vcc, s0, v0
+; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, -v0, v1, vcc
+; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, 0x7fc00000
+; GCN-NSZ-NEXT:    v_cmp_nlt_f32_e32 vcc, 0, v0
+; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, v1, 0, vcc
+; GCN-NSZ-NEXT:    ; return to shader part epilog
 .entry:
   %tmp7 = fdiv afn float 1.000000e+00, %tmp6
   %tmp8 = fmul float 0.000000e+00, %tmp7
@@ -280,6 +343,51 @@ define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -301,6 +409,57 @@ define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -326,6 +485,57 @@ define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e64 v2, v4, -v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e64 v2, v4, -v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -347,6 +557,51 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -367,6 +622,51 @@ define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_x_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_x_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -387,6 +687,51 @@ define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e64 v2, v0, -v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e64 v2, v0, -v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -411,6 +756,57 @@ define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr a
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -434,6 +830,61 @@ define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mul_f32_e64 v3, -v4, s2
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mul_f32_e64 v3, -v4, s2
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -462,6 +913,55 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_max_f32_e32 v2, v3, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_max_f32_e32 v2, v3, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -481,6 +981,10 @@ define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_max_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   ret float %fneg
@@ -492,6 +996,41 @@ define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, v2, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_self_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, v2, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -508,6 +1047,10 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %a)
   %min.fneg = fneg float %min
   ret float %min.fneg
@@ -519,6 +1062,41 @@ define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -535,6 +1113,10 @@ define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -4.0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -546,6 +1128,41 @@ define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -562,6 +1179,10 @@ define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, 4.0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -574,6 +1195,41 @@ define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MIN]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_min_f32_e32 v2, 0, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_min_f32_e32 v2, 0, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -591,6 +1247,41 @@ define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -614,6 +1305,42 @@ define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, pt
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -636,6 +1363,41 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0x3e22f983, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -660,6 +1422,44 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT:    v_max_f32_e32 v0, 0xbe230000, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f16_e32 v2, v3, v3
+; VI-NEXT:    v_min_f16_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x8000, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -683,6 +1483,43 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; SI-NEXT:    v_max_f32_e32 v0, 0x3e230000, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f16_e64 v2, -v3, -v3
+; VI-NEXT:    v_max_f16_e32 v2, 0.15915494, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -707,6 +1544,44 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RESULT_LO]]:[[RESULT_HI]]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT:    s_mov_b32 s3, 0xbfc45f30
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; VI-NEXT:    v_min_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -731,6 +1606,43 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT:    s_mov_b32 s3, 0x3fc45f30
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; VI-NEXT:    v_max_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -747,6 +1659,10 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, p
 ; GCN: v_max_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, 0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -0.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -760,6 +1676,55 @@ define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT:    v_min_f32_e32 v2, 0, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_min_f32_e32 v2, 0, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -789,6 +1754,55 @@ define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v4
+; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT:    v_mul_f32_e32 v2, v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -810,6 +1824,11 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_minnum_foldable_use_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e32 v0, 0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 0.0, float %a)
   %fneg = fneg float %min
   %mul = fmul float %fneg, %b
@@ -828,6 +1847,61 @@ define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_max_f32_e32 v2, v3, v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_max_f32_e32 v2, v3, v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -850,6 +1924,11 @@ define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_multi_use_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   %use1 = fmul float %min, 4.0
@@ -871,6 +1950,55 @@ define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_min_f32_e32 v2, v3, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_min_f32_e32 v2, v3, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -890,6 +2018,10 @@ define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_min_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   ret float %fneg
@@ -901,6 +2033,41 @@ define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, v2, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, v2, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -917,6 +2084,10 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %a)
   %max.fneg = fneg float %max
   ret float %max.fneg
@@ -928,6 +2099,41 @@ define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -944,6 +2150,10 @@ define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -4.0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -955,6 +2165,41 @@ define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, 4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -971,6 +2216,10 @@ define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, 4.0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -983,6 +2232,41 @@ define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MAX]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_max_f32_e32 v2, 0, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f32_e32 v2, 0, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1000,6 +2284,41 @@ define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, 0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1016,6 +2335,10 @@ define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, 0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -0.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -1029,6 +2352,55 @@ define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT:    v_max_f32_e32 v2, 0, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_max_f32_e32 v2, 0, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1050,6 +2422,11 @@ define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_maxnum_foldable_use_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e32 v0, 0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 0.0, float %a)
   %fneg = fneg float %max
   %mul = fmul float %fneg, %b
@@ -1068,6 +2445,61 @@ define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_min_f32_e32 v2, v3, v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_min_f32_e32 v2, v3, v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1090,6 +2522,11 @@ define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_maxnum_multi_use_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   %use1 = fmul float %max, 4.0
@@ -1139,6 +2576,65 @@ define amdgpu_kernel void @v_fneg_fma_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: v_fneg_fma_store_use_fma_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, v6, v2, v3
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fma_store_use_fma_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, v6, v2, v3
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1494,6 +2990,41 @@ define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(ptr addrspace(1) %out,
 ; GCN: v_cvt_f64_f32_e64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1510,6 +3041,41 @@ define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, pt
 ; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
 ; GCN: {{buffer|flat}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1529,6 +3095,49 @@ define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %ou
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FNEG_A]]
 define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
+; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
+; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1549,6 +3158,51 @@ define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[CVT_LO]]:[[CVT_HI]]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; VI-NEXT:    v_mov_b32_e32 v4, v0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1569,6 +3223,52 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    v_mul_f64 v[0:1], v[0:1], 4.0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    v_add_u32_e32 v5, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[1:2], v1
+; VI-NEXT:    v_mul_f64 v[3:4], v[1:2], 4.0
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dwordx2 v[5:6], v[1:2]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[5:6], v[3:4]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1585,6 +3285,49 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(p
 ; FIXME: Source modifiers not folded for f16->f32
 ; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1599,6 +3342,51 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrsp
 
 ; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 4.0, v3
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    v_mul_f32_e32 v3, 4.0, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1621,6 +3409,41 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(p
 ; GCN: v_cvt_f32_f64_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1637,6 +3460,41 @@ define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1656,6 +3514,49 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[A_LO]]:[[NEG_A_HI]]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, s1
+; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[3:4], v5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, s1
+; VI-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; VI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[3:4], v5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1677,6 +3578,51 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, double %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, s1
+; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT:    v_mul_f64 v[0:1], -v[1:2], s[4:5]
+; SI-NEXT:    flat_store_dword v[3:4], v5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_mul_f64 v[3:4], -v[1:2], s[4:5]
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[3:4]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1696,6 +3642,41 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1712,6 +3693,41 @@ define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1731,6 +3747,49 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1750,6 +3809,49 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v2, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v2, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1770,6 +3872,51 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v2, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, s4
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v2, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, s4
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1793,6 +3940,39 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_rcp_f32_e64 v3, -v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_rcp_f32_e64 v3, -v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1809,6 +3989,39 @@ define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_rcp_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_rcp_f32_e32 v3, v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1828,6 +4041,47 @@ define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rcp_f32_e32 v4, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1848,6 +4102,49 @@ define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    v_mul_f32_e64 v2, -v3, s4
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rcp_f32_e32 v4, v3
+; VI-NEXT:    v_mul_f32_e64 v2, -v3, s4
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1872,6 +4169,51 @@ define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: v_mul_legacy_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1895,6 +4237,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addr
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1919,6 +4312,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrsp
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
+; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
+; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1940,6 +4384,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrsp
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1960,6 +4449,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1980,6 +4514,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2003,6 +4582,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2026,6 +4656,61 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2053,6 +4738,43 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(
 ; GCN: v_sin_f32_e32 [[RESULT:v[0-9]+]], [[FRACT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_sin_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
+; SI-NEXT:    v_fract_f32_e32 v0, v0
+; SI-NEXT:    v_sin_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_sin_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
+; VI-NEXT:    v_fract_f32_e32 v0, v0
+; VI-NEXT:    v_sin_f32_e32 v3, v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2069,6 +4791,39 @@ define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_sin_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_amdgcn_sin_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_sin_f32_e64 v3, -v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_amdgcn_sin_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_sin_f32_e64 v3, -v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2089,6 +4844,39 @@ define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_trunc_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_trunc_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_trunc_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_trunc_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_trunc_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_trunc_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2136,6 +4924,39 @@ define amdgpu_kernel void @v_fneg_round_f32(ptr addrspace(1) %out, ptr addrspace
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rint_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rndne_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rint_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rndne_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2156,6 +4977,39 @@ define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_nearbyint_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rndne_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_nearbyint_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rndne_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2176,6 +5030,39 @@ define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], -1.0, [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_canonicalize_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_canonicalize_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2198,6 +5085,63 @@ define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr ad
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p1_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_mov_b32 m0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    v_interp_p1_f32 v3, v2, attr0.x
+; SI-NEXT:    v_interp_p1_f32 v2, v2, attr0.y
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p1_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_mov_b32 m0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    v_interp_p1_f32_e32 v3, v2, attr0.x
+; VI-NEXT:    v_interp_p1_f32_e32 v2, v2, attr0.y
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2221,6 +5165,67 @@ define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p2_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v6, 4.0
+; SI-NEXT:    s_mov_b32 m0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_mov_b32_e32 v3, 4.0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    v_interp_p2_f32 v6, v2, attr0.x
+; SI-NEXT:    v_interp_p2_f32 v3, v2, attr0.y
+; SI-NEXT:    flat_store_dword v[0:1], v6
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p2_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    v_mov_b32_e32 v6, 4.0
+; VI-NEXT:    s_mov_b32 m0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_mov_b32_e32 v3, 4.0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    v_interp_p2_f32_e32 v6, v2, attr0.x
+; VI-NEXT:    v_interp_p2_f32_e32 v3, v2, attr0.y
+; VI-NEXT:    flat_store_dword v[0:1], v6
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2256,6 +5261,85 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 
 define amdgpu_kernel void @v_fneg_copytoreg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_copytoreg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_load_dword s0, s[4:5], 0x11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v7, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s8, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    v_mul_f32_e32 v3, v7, v3
+; SI-NEXT:    s_cbranch_scc0 .LBB105_2
+; SI-NEXT:  ; %bb.1: ; %endif
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+; SI-NEXT:  .LBB105_2: ; %if
+; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_copytoreg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_load_dword s0, s[4:5], 0x44
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v7, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s8, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    v_mul_f32_e32 v3, v7, v3
+; VI-NEXT:    s_cbranch_scc0 .LBB105_2
+; VI-NEXT:  ; %bb.1: ; %endif
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+; VI-NEXT:  .LBB105_2: ; %if
+; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2292,6 +5376,67 @@ endif:
 ; GCN: ; use [[MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v6, -v2
+; SI-NEXT:    ;;#ASMSTART
+; SI-NEXT:    ; use v2
+; SI-NEXT:    ;;#ASMEND
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v6, -v2
+; VI-NEXT:    ;;#ASMSTART
+; VI-NEXT:    ; use v2
+; VI-NEXT:    ;;#ASMEND
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2321,6 +5466,69 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: ; use [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, v6, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    ;;#ASMSTART
+; SI-NEXT:    ; use v3
+; SI-NEXT:    ;;#ASMEND
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, v6, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    ;;#ASMSTART
+; VI-NEXT:    ; use v3
+; VI-NEXT:    ;;#ASMEND
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2356,6 +5564,65 @@ define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, -v6, v2, v3
+; SI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, -v6, v2, v3
+; VI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2390,6 +5657,65 @@ define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e64 v2, -v6, v2
+; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e64 v2, -v6, v2
+; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2423,6 +5749,65 @@ define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
+; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
+; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2501,6 +5886,79 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(ptr addrspace(1)
 ; GCN-NEXT: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: free_fold_src_code_size_cost_use_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    v_mul_f64 v[4:5], -v[0:1], v[4:5]
+; SI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: free_fold_src_code_size_cost_use_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; VI-NEXT:    v_mul_f64 v[2:3], -v[0:1], v[4:5]
+; VI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v5, s9
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2534,6 +5992,73 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1)
 ; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: one_use_cost_to_fold_into_src_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v8, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_trunc_f32_e32 v0, v8
+; SI-NEXT:    v_fma_f32 v2, -v0, v2, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: one_use_cost_to_fold_into_src_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v8, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_trunc_f32_e32 v0, v8
+; VI-NEXT:    v_fma_f32 v2, -v0, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2564,6 +6089,79 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %o
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: multi_use_cost_to_fold_into_src:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dword v8, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v4, v[6:7] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_trunc_f32_e32 v5, v8
+; SI-NEXT:    v_fma_f32 v2, -v5, v2, v3
+; SI-NEXT:    v_mul_f32_e32 v3, v5, v4
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multi_use_cost_to_fold_into_src:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dword v8, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[6:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_trunc_f32_e32 v5, v8
+; VI-NEXT:    v_fma_f32 v2, -v5, v2, v3
+; VI-NEXT:    v_mul_f32_e32 v3, v5, v4
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2599,6 +6197,17 @@ define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out
 ; GCN-DAG: v_mul_f32_e32 v1, [[SUB1]], v5
 ; GCN: s_setpc_b64
 define <2 x float> @fneg_fma_fneg_dagcombine_loop(<2 x float> %arg, <2 x float> %arg1, <2 x float> %arg2) #0 {
+; GCN-LABEL: fneg_fma_fneg_dagcombine_loop:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, 1
+; GCN-NEXT:    v_fma_f32 v3, v3, -v5, s4
+; GCN-NEXT:    v_fma_f32 v2, v2, -v4, s4
+; GCN-NEXT:    v_sub_f32_e32 v1, v3, v1
+; GCN-NEXT:    v_sub_f32_e32 v0, v2, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v4
+; GCN-NEXT:    v_mul_f32_e32 v1, v1, v5
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %i3 = call fast <2 x float> @llvm.fma.v2f32(<2 x float> %arg1, <2 x float> %arg2, <2 x float> zeroinitializer)
   %i4 = fadd fast <2 x float> %i3, %arg
@@ -2612,6 +6221,11 @@ bb:
 ; GCN: s_waitcnt
 ; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
 define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: nnan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul float %x, -1.0
   %add = fmul nnan float %mul, %y
   ret float %add
@@ -2623,6 +6237,11 @@ define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN: v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT: s_setpc_b64
 define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denormal_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul nnan float %x, -1.0
   %add = fmul float %mul, %y
   ret float %add
@@ -2635,6 +6254,12 @@ define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
 ; GCN-NEXT: s_setpc_b64
 define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denorm_snan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, v0, -v0
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %canonical = fmul float %x, %x
   %mul = fmul float %canonical, -1.0
   %add = fmul float %mul, %y
@@ -2646,6 +6271,12 @@ define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 [[TMP:v[0-9]+]], 1.0, v0
 ; GCN-NEXT: v_mul_f32_e64 v0, -[[TMP]], v1
 define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: flush_snan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %quiet = call float @llvm.canonicalize.f32(float %x)
   %mul = fmul float %quiet, -1.0
   %add = fmul float %mul, %y
@@ -2658,6 +6289,13 @@ define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN-NEXT: v_sub_f32_e32 v0, v3, v0
 ; GCN-NEXT: s_setpc_b64
 define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_sub_f32_e32 v0, v3, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg float %x
   %neg.y  = fneg float %y
@@ -2673,6 +6311,14 @@ define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z)
 ; GCN-NEXT: v_add_f64 v[0:1], v[5:6], -v[1:2]
 ; GCN-NEXT: s_setpc_b64
 define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; GCN-NEXT:    v_add_f64 v[0:1], v[5:6], -v[1:2]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg double %x
   %neg.y  = fneg double %y
@@ -2698,6 +6344,27 @@ define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: s_setpc_b64
 define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
+; SI-LABEL: fadd_select_fneg_fneg_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT:    v_sub_f32_e32 v0, v3, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg half %x
   %neg.y = fneg half %y
@@ -2730,6 +6397,39 @@ define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
 define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x half> %y, <2 x half> %z) {
+; SI-LABEL: fadd_select_fneg_fneg_v2f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v2, v3, v2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v5
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_sub_f32_e32 v0, v4, v0
+; SI-NEXT:    v_sub_f32_e32 v1, v3, v1
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_v2f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_sub_f16_sdwa v1, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg <2 x half> %x
   %neg.y = fneg <2 x half> %y
@@ -2746,6 +6446,13 @@ define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x ha
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-LABEL: v_fneg_select_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %select = select i1 %cond, float %a, float %b
   %fneg = fneg float %select
@@ -2769,6 +6476,15 @@ define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
 
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-LABEL: v_fneg_select_2_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_add_f32_e32 v1, 2.0, v1
+; GCN-NEXT:    v_add_f32_e32 v2, 4.0, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %add.0 = fadd float %a, 2.0
   %add.1 = fadd float %b, 4.0
@@ -2782,6 +6498,43 @@ define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_posk_select_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, 4.0, v4, vcc
+; SI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT:    flat_store_dword v[1:2], v0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_select_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, 4.0, v4, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2799,6 +6552,43 @@ define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr add
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_negk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_negk_select_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, -4.0, v4, vcc
+; SI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT:    flat_store_dword v[1:2], v0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_select_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, -4.0, v4, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2839,3 +6629,5 @@ attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign"
 attributes #1 = { nounwind readnone }
 attributes #2 = { nounwind "unsafe-fp-math"="true" }
 attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN-SAFE: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index f78b302761ed289..203c6223e762f20 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -2207,26 +2207,26 @@ define float @v_fneg_round_f32(float %a) #0 {
 ; GCN-SAFE-LABEL: v_fneg_round_f32:
 ; GCN-SAFE:       ; %bb.0:
 ; GCN-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-SAFE-NEXT:    v_trunc_f32_e32 v1, v0
+; GCN-SAFE-NEXT:    v_sub_f32_e32 v2, v0, v1
+; GCN-SAFE-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, 0.5
+; GCN-SAFE-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[4:5]
 ; GCN-SAFE-NEXT:    s_brev_b32 s4, -2
-; GCN-SAFE-NEXT:    v_trunc_f32_e32 v2, v0
-; GCN-SAFE-NEXT:    v_bfi_b32 v1, s4, 1.0, v0
-; GCN-SAFE-NEXT:    v_sub_f32_e32 v0, v0, v2
-; GCN-SAFE-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; GCN-SAFE-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; GCN-SAFE-NEXT:    v_add_f32_e32 v0, v2, v0
+; GCN-SAFE-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; GCN-SAFE-NEXT:    v_add_f32_e32 v0, v1, v0
 ; GCN-SAFE-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-SAFE-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GCN-NSZ-LABEL: v_fneg_round_f32:
 ; GCN-NSZ:       ; %bb.0:
 ; GCN-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NSZ-NEXT:    v_trunc_f32_e32 v1, v0
+; GCN-NSZ-NEXT:    v_sub_f32_e32 v2, v0, v1
+; GCN-NSZ-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, 0.5
+; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[4:5]
 ; GCN-NSZ-NEXT:    s_brev_b32 s4, -2
-; GCN-NSZ-NEXT:    v_trunc_f32_e32 v2, v0
-; GCN-NSZ-NEXT:    v_bfi_b32 v1, s4, 1.0, v0
-; GCN-NSZ-NEXT:    v_sub_f32_e32 v0, v0, v2
-; GCN-NSZ-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; GCN-NSZ-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; GCN-NSZ-NEXT:    v_sub_f32_e64 v0, -v2, v0
+; GCN-NSZ-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; GCN-NSZ-NEXT:    v_sub_f32_e64 v0, -v1, v0
 ; GCN-NSZ-NEXT:    s_setpc_b64 s[30:31]
   %round = call float @llvm.round.f32(float %a)
   %fneg = fneg float %round
diff --git a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
index 5ee6eda85f0ad1f..1ad4ce5ae52fff7 100644
--- a/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
+++ b/llvm/test/CodeGen/AMDGPU/known-never-snan.ll
@@ -455,13 +455,13 @@ define float @v_test_known_not_snan_round_input_fmed3_r_i_i_f32(float %a) #0 {
 ; GCN-LABEL: v_test_known_not_snan_round_input_fmed3_r_i_i_f32:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_trunc_f32_e32 v1, v0
+; GCN-NEXT:    v_sub_f32_e32 v2, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, 0.5
+; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[4:5]
 ; GCN-NEXT:    s_brev_b32 s4, -2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v0
-; GCN-NEXT:    v_bfi_b32 v1, s4, 1.0, v0
-; GCN-NEXT:    v_sub_f32_e32 v0, v0, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; GCN-NEXT:    v_add_f32_e32 v0, v2, v0
+; GCN-NEXT:    v_bfi_b32 v0, s4, v2, v0
+; GCN-NEXT:    v_add_f32_e32 v0, v1, v0
 ; GCN-NEXT:    v_med3_f32 v0, v0, 2.0, 4.0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %known.not.snan = call float @llvm.round.f32(float %a)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index 6b013175ff61b3f..2acd9c0017b099b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -9,30 +9,32 @@ define amdgpu_kernel void @round_f64(ptr addrspace(1) %out, double %x) #0 {
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_mov_b32 s5, 0xfffff
 ; SI-NEXT:    s_mov_b32 s4, s6
-; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bfe_u32 s8, s3, 0xb0014
-; SI-NEXT:    s_addk_i32 s8, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[4:5], s8
+; SI-NEXT:    s_bfe_u32 s7, s3, 0xb0014
+; SI-NEXT:    s_addk_i32 s7, 0xfc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[4:5], s7
+; SI-NEXT:    s_and_b32 s8, s3, 0x80000000
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[2:3], s[4:5]
-; SI-NEXT:    s_and_b32 s10, s3, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s8, 0
+; SI-NEXT:    s_cmp_lt_i32 s7, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s10, s5
-; SI-NEXT:    s_cmp_gt_i32 s8, 51
+; SI-NEXT:    s_cselect_b32 s5, s8, s5
+; SI-NEXT:    s_cmp_gt_i32 s7, 51
 ; SI-NEXT:    s_cselect_b32 s8, s2, s4
 ; SI-NEXT:    s_cselect_b32 s9, s3, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    v_add_f64 v[0:1], s[2:3], -v[0:1]
 ; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[0:1]|, 0.5
-; SI-NEXT:    s_or_b32 s0, s10, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
-; SI-NEXT:    s_cselect_b32 s0, s0, 0
+; SI-NEXT:    v_cmp_ge_f64_e64 s[10:11], |v[0:1]|, 0.5
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    s_and_b64 s[10:11], s[10:11], exec
+; SI-NEXT:    s_cselect_b32 s0, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_bfi_b32 v1, s2, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    v_add_f64 v[0:1], s[8:9], v[0:1]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -40,19 +42,21 @@ define amdgpu_kernel void @round_f64(ptr addrspace(1) %out, double %x) #0 {
 ; CI-LABEL: round_f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s8, 0
+; CI-NEXT:    s_brev_b32 s5, -2
 ; CI-NEXT:    s_mov_b32 s7, 0xf000
 ; CI-NEXT:    s_mov_b32 s6, -1
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[2:3]
 ; CI-NEXT:    s_mov_b32 s4, s0
 ; CI-NEXT:    v_add_f64 v[2:3], s[2:3], -v[0:1]
-; CI-NEXT:    s_and_b32 s0, s3, 0x80000000
-; CI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[2:3]|, 0.5
-; CI-NEXT:    s_or_b32 s0, s0, 0x3ff00000
-; CI-NEXT:    s_and_b64 s[2:3], s[2:3], exec
-; CI-NEXT:    s_cselect_b32 s9, s0, 0
-; CI-NEXT:    v_add_f64 v[0:1], v[0:1], s[8:9]
+; CI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[2:3]|, 0.5
+; CI-NEXT:    v_mov_b32_e32 v2, s3
+; CI-NEXT:    s_and_b64 s[2:3], s[8:9], exec
+; CI-NEXT:    s_cselect_b32 s0, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v3, s0
+; CI-NEXT:    v_bfi_b32 v3, s5, v3, v2
+; CI-NEXT:    v_mov_b32_e32 v2, 0
+; CI-NEXT:    v_add_f64 v[0:1], v[0:1], v[2:3]
 ; CI-NEXT:    s_mov_b32 s5, s1
 ; CI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; CI-NEXT:    s_endpgm
@@ -75,6 +79,7 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_movk_i32 s4, 0xfc01
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s3, 0xfffff
+; SI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_bfe_u32 v4, v3, 20, 11
 ; SI-NEXT:    v_add_i32_e32 v6, vcc, s4, v4
@@ -90,11 +95,12 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    v_cmp_lt_i32_e32 vcc, 51, v6
 ; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v3, vcc
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v2, vcc
-; SI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[4:5]
-; SI-NEXT:    v_or_b32_e32 v6, 0x3ff00000, v7
-; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
+; SI-NEXT:    v_add_f64 v[6:7], v[2:3], -v[4:5]
+; SI-NEXT:    s_brev_b32 s2, -2
+; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
+; SI-NEXT:    v_cndmask_b32_e32 v2, 0, v8, vcc
+; SI-NEXT:    v_bfi_b32 v3, s2, v2, v3
 ; SI-NEXT:    v_mov_b32_e32 v2, v1
-; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
 ; SI-NEXT:    v_add_f64 v[2:3], v[4:5], v[2:3]
 ; SI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; SI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
@@ -110,16 +116,17 @@ define amdgpu_kernel void @v_round_f64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    s_mov_b64 s[4:5], s[2:3]
 ; CI-NEXT:    buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
-; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
+; CI-NEXT:    v_mov_b32_e32 v8, 0x3ff00000
+; CI-NEXT:    s_brev_b32 s2, -2
 ; CI-NEXT:    s_waitcnt vmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], v[2:3]
-; CI-NEXT:    v_and_b32_e32 v6, 0x80000000, v3
-; CI-NEXT:    v_add_f64 v[2:3], v[2:3], -v[4:5]
-; CI-NEXT:    v_or_b32_e32 v6, 0x3ff00000, v6
-; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
+; CI-NEXT:    v_add_f64 v[6:7], v[2:3], -v[4:5]
+; CI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
+; CI-NEXT:    v_cndmask_b32_e32 v2, 0, v8, vcc
+; CI-NEXT:    v_bfi_b32 v3, s2, v2, v3
 ; CI-NEXT:    v_mov_b32_e32 v2, v1
-; CI-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
 ; CI-NEXT:    v_add_f64 v[2:3], v[4:5], v[2:3]
+; CI-NEXT:    s_mov_b64 s[2:3], s[6:7]
 ; CI-NEXT:    buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64
 ; CI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -154,12 +161,12 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_or_b32 s3, s12, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
-; SI-NEXT:    v_mov_b32_e32 v0, 0
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_mov_b32_e32 v4, s5
+; SI-NEXT:    v_cmp_ge_f64_e64 s[12:13], |v[0:1]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_and_b64 s[12:13], s[12:13], exec
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s3
 ; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
 ; SI-NEXT:    s_lshr_b64 s[6:7], s[8:9], s3
@@ -173,13 +180,16 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; SI-NEXT:    s_cselect_b32 s7, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v2, s6
 ; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_add_f64 v[4:5], s[4:5], -v[2:3]
-; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[4:5]|, 0.5
+; SI-NEXT:    v_add_f64 v[2:3], s[4:5], -v[2:3]
+; SI-NEXT:    s_brev_b32 s12, -2
+; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[2:3]|, 0.5
+; SI-NEXT:    v_bfi_b32 v1, s12, v0, v1
+; SI-NEXT:    s_and_b64 s[8:9], s[8:9], exec
+; SI-NEXT:    v_mov_b32_e32 v0, 0
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
 ; SI-NEXT:    v_add_f64 v[2:3], s[10:11], v[0:1]
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_bfi_b32 v1, s12, v1, v4
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[0:1]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -189,25 +199,28 @@ define amdgpu_kernel void @round_v2f64(ptr addrspace(1) %out, <2 x double> %in)
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; CI-NEXT:    s_mov_b32 s8, 0
+; CI-NEXT:    s_brev_b32 s2, -2
+; CI-NEXT:    v_mov_b32_e32 v0, 0
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
-; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
-; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
-; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
-; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    v_trunc_f64_e32 v[2:3], s[6:7]
+; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[4:5]
+; CI-NEXT:    v_add_f64 v[4:5], s[6:7], -v[2:3]
+; CI-NEXT:    v_mov_b32_e32 v1, s7
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[4:5]|, 0.5
+; CI-NEXT:    v_add_f64 v[4:5], s[4:5], -v[6:7]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[4:5]|, 0.5
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v8, s4
 ; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; CI-NEXT:    s_cselect_b32 s9, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[8:9]
-; CI-NEXT:    s_cselect_b32 s9, s2, 0
-; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[8:9]
+; CI-NEXT:    v_bfi_b32 v1, s2, v8, v1
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[2:3], v[2:3], v[0:1]
+; CI-NEXT:    v_mov_b32_e32 v1, s4
+; CI-NEXT:    v_mov_b32_e32 v4, s5
+; CI-NEXT:    v_bfi_b32 v1, s2, v1, v4
+; CI-NEXT:    v_add_f64 v[0:1], v[6:7], v[0:1]
 ; CI-NEXT:    s_mov_b32 s2, -1
 ; CI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; CI-NEXT:    s_endpgm
@@ -223,7 +236,7 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_mov_b32 s13, 0xfffff
 ; SI-NEXT:    s_mov_b32 s12, s2
-; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    s_brev_b32 s18, -2
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_bfe_u32 s3, s7, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
@@ -239,12 +252,12 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s15
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_or_b32 s3, s16, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    v_mov_b32_e32 v4, 0
+; SI-NEXT:    v_cmp_ge_f64_e64 s[16:17], |v[0:1]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_and_b64 s[16:17], s[16:17], exec
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s3
 ; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
 ; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s3
@@ -255,15 +268,16 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    s_cselect_b32 s7, s16, s7
 ; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s6, s4, s6
+; SI-NEXT:    v_bfi_b32 v5, s18, v0, v1
 ; SI-NEXT:    s_cselect_b32 s7, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v0, s6
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
 ; SI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[0:1]
-; SI-NEXT:    s_or_b32 s3, s16, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
 ; SI-NEXT:    v_add_f64 v[2:3], s[14:15], v[4:5]
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    v_cmp_ge_f64_e64 s[16:17], |v[0:1]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v6, s5
+; SI-NEXT:    s_and_b64 s[14:15], s[16:17], exec
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s3
 ; SI-NEXT:    s_bfe_u32 s3, s11, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
@@ -278,13 +292,13 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    s_cselect_b32 s5, s11, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_add_f64 v[6:7], s[10:11], -v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], s[10:11], -v[0:1]
+; SI-NEXT:    v_bfi_b32 v5, s18, v5, v6
+; SI-NEXT:    v_cmp_ge_f64_e64 s[14:15], |v[0:1]|, 0.5
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[4:5]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s14, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    s_and_b64 s[6:7], s[14:15], exec
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v8, s3
 ; SI-NEXT:    s_bfe_u32 s3, s9, 0xb0014
 ; SI-NEXT:    s_addk_i32 s3, 0xfc01
 ; SI-NEXT:    s_lshr_b64 s[6:7], s[12:13], s3
@@ -296,15 +310,19 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; SI-NEXT:    s_cmp_gt_i32 s3, 51
 ; SI-NEXT:    s_cselect_b32 s6, s8, s6
 ; SI-NEXT:    s_cselect_b32 s7, s9, s7
-; SI-NEXT:    v_mov_b32_e32 v6, s6
-; SI-NEXT:    v_mov_b32_e32 v7, s7
-; SI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[6:7]
+; SI-NEXT:    v_mov_b32_e32 v5, s6
+; SI-NEXT:    v_mov_b32_e32 v6, s7
+; SI-NEXT:    v_add_f64 v[6:7], s[8:9], -v[5:6]
+; SI-NEXT:    v_mov_b32_e32 v9, s11
+; SI-NEXT:    v_cmp_ge_f64_e64 s[10:11], |v[6:7]|, 0.5
+; SI-NEXT:    v_bfi_b32 v5, s18, v8, v9
 ; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[4:5]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s10, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
+; SI-NEXT:    s_and_b64 s[4:5], s[10:11], exec
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_cselect_b32 s3, 0x3ff00000, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s3
+; SI-NEXT:    v_mov_b32_e32 v8, s9
+; SI-NEXT:    v_bfi_b32 v5, s18, v5, v8
 ; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[4:5]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -315,42 +333,47 @@ define amdgpu_kernel void @round_v4f64(ptr addrspace(1) %out, <4 x double> %in)
 ; CI-LABEL: round_v4f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x11
-; CI-NEXT:    s_mov_b32 s12, 0
+; CI-NEXT:    s_brev_b32 s2, -2
+; CI-NEXT:    v_mov_b32_e32 v4, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
-; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
+; CI-NEXT:    v_mov_b32_e32 v5, s7
 ; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
+; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[4:5]
 ; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    v_add_f64 v[2:3], s[4:5], -v[6:7]
 ; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; CI-NEXT:    s_cselect_b32 s13, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[10:11]
-; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[12:13]
-; CI-NEXT:    s_cselect_b32 s13, s2, 0
-; CI-NEXT:    v_add_f64 v[8:9], s[10:11], -v[6:7]
-; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[12:13]
-; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[8:9]
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s11, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[4:5]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v8, s4
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
+; CI-NEXT:    v_bfi_b32 v5, s2, v8, v5
+; CI-NEXT:    v_trunc_f64_e32 v[8:9], s[10:11]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    v_add_f64 v[2:3], v[0:1], v[4:5]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[0:1], s[10:11], -v[8:9]
+; CI-NEXT:    v_mov_b32_e32 v5, s4
+; CI-NEXT:    v_mov_b32_e32 v10, s5
+; CI-NEXT:    v_bfi_b32 v5, s2, v5, v10
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
+; CI-NEXT:    v_trunc_f64_e32 v[10:11], s[8:9]
+; CI-NEXT:    v_add_f64 v[0:1], v[6:7], v[4:5]
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    s_cselect_b32 s13, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s9, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[6:7], s[8:9], -v[10:11]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v5, s4
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[6:7]|, 0.5
+; CI-NEXT:    v_mov_b32_e32 v12, s11
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    v_add_f64 v[6:7], v[6:7], s[12:13]
-; CI-NEXT:    s_cselect_b32 s13, s2, 0
-; CI-NEXT:    v_add_f64 v[4:5], v[4:5], s[12:13]
+; CI-NEXT:    v_bfi_b32 v5, s2, v5, v12
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[6:7], v[8:9], v[4:5]
+; CI-NEXT:    v_mov_b32_e32 v5, s4
+; CI-NEXT:    v_mov_b32_e32 v8, s9
+; CI-NEXT:    v_bfi_b32 v5, s2, v5, v8
+; CI-NEXT:    v_add_f64 v[4:5], v[10:11], v[4:5]
 ; CI-NEXT:    s_mov_b32 s2, -1
 ; CI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; CI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -383,152 +406,161 @@ define amdgpu_kernel void @round_v8f64(ptr addrspace(1) %out, <8 x double> %in)
 ; SI-NEXT:    v_mov_b32_e32 v0, s22
 ; SI-NEXT:    v_mov_b32_e32 v1, s23
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], -v[0:1]
-; SI-NEXT:    s_or_b32 s3, s24, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[0:1]|, 0.5
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s5, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_brev_b32 s3, -2
+; SI-NEXT:    v_cmp_ge_f64_e64 s[24:25], |v[0:1]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v1, s7
+; SI-NEXT:    s_and_b64 s[24:25], s[24:25], exec
+; SI-NEXT:    s_cselect_b32 s6, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s6
+; SI-NEXT:    s_bfe_u32 s6, s5, 0xb0014
+; SI-NEXT:    s_add_i32 s24, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s24
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[4:5], s[6:7]
-; SI-NEXT:    s_and_b32 s24, s5, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s25, s5, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s24, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
-; SI-NEXT:    s_cselect_b32 s7, s24, s7
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s25, s7
+; SI-NEXT:    s_cmp_gt_i32 s24, 51
 ; SI-NEXT:    s_cselect_b32 s6, s4, s6
+; SI-NEXT:    v_bfi_b32 v9, s3, v0, v1
 ; SI-NEXT:    s_cselect_b32 s7, s5, s7
 ; SI-NEXT:    v_mov_b32_e32 v0, s6
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
 ; SI-NEXT:    v_add_f64 v[0:1], s[4:5], -v[0:1]
-; SI-NEXT:    s_or_b32 s3, s24, 0x3ff00000
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
 ; SI-NEXT:    v_add_f64 v[2:3], s[22:23], v[8:9]
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s11, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
+; SI-NEXT:    v_cmp_ge_f64_e64 s[24:25], |v[0:1]|, 0.5
+; SI-NEXT:    v_mov_b32_e32 v5, s5
+; SI-NEXT:    s_and_b64 s[22:23], s[24:25], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    s_bfe_u32 s4, s11, 0xb0014
+; SI-NEXT:    s_add_i32 s22, s4, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s22
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[10:11], s[4:5]
-; SI-NEXT:    s_and_b32 s22, s11, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s23, s11, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s22, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s22, s5
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s5, s23, s5
+; SI-NEXT:    s_cmp_gt_i32 s22, 51
 ; SI-NEXT:    s_cselect_b32 s4, s10, s4
 ; SI-NEXT:    s_cselect_b32 s5, s11, s5
 ; SI-NEXT:    v_mov_b32_e32 v0, s4
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_add_f64 v[4:5], s[10:11], -v[0:1]
+; SI-NEXT:    v_add_f64 v[0:1], s[10:11], -v[0:1]
+; SI-NEXT:    v_bfi_b32 v9, s3, v4, v5
+; SI-NEXT:    v_cmp_ge_f64_e64 s[22:23], |v[0:1]|, 0.5
 ; SI-NEXT:    v_add_f64 v[0:1], s[6:7], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[4:5]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s22, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s9, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_and_b64 s[6:7], s[22:23], exec
+; SI-NEXT:    s_cselect_b32 s6, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v6, s6
+; SI-NEXT:    s_bfe_u32 s6, s9, 0xb0014
+; SI-NEXT:    s_add_i32 s10, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s10
+; SI-NEXT:    v_mov_b32_e32 v7, s11
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[8:9], s[6:7]
-; SI-NEXT:    s_and_b32 s10, s9, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s11, s9, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s10, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
-; SI-NEXT:    s_cselect_b32 s7, s10, s7
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s11, s7
+; SI-NEXT:    s_cmp_gt_i32 s10, 51
 ; SI-NEXT:    s_cselect_b32 s6, s8, s6
 ; SI-NEXT:    s_cselect_b32 s7, s9, s7
 ; SI-NEXT:    v_mov_b32_e32 v4, s6
 ; SI-NEXT:    v_mov_b32_e32 v5, s7
 ; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[4:5]
+; SI-NEXT:    v_bfi_b32 v9, s3, v6, v7
+; SI-NEXT:    v_cmp_ge_f64_e64 s[10:11], |v[4:5]|, 0.5
 ; SI-NEXT:    v_add_f64 v[6:7], s[4:5], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[4:5]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s10, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s15, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
+; SI-NEXT:    s_and_b64 s[4:5], s[10:11], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s4
+; SI-NEXT:    s_bfe_u32 s4, s15, 0xb0014
+; SI-NEXT:    s_add_i32 s8, s4, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s8
+; SI-NEXT:    v_mov_b32_e32 v10, s9
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[14:15], s[4:5]
-; SI-NEXT:    s_and_b32 s8, s15, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s9, s15, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s8, s5
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s5, s9, s5
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
 ; SI-NEXT:    s_cselect_b32 s4, s14, s4
 ; SI-NEXT:    s_cselect_b32 s5, s15, s5
 ; SI-NEXT:    v_mov_b32_e32 v4, s4
 ; SI-NEXT:    v_mov_b32_e32 v5, s5
-; SI-NEXT:    v_add_f64 v[10:11], s[14:15], -v[4:5]
+; SI-NEXT:    v_add_f64 v[4:5], s[14:15], -v[4:5]
+; SI-NEXT:    v_bfi_b32 v9, s3, v9, v10
+; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[4:5]|, 0.5
 ; SI-NEXT:    v_add_f64 v[4:5], s[6:7], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[10:11]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s13, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_and_b64 s[6:7], s[8:9], exec
+; SI-NEXT:    s_cselect_b32 s6, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v12, s6
+; SI-NEXT:    s_bfe_u32 s6, s13, 0xb0014
+; SI-NEXT:    s_add_i32 s8, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s8
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[12:13], s[6:7]
-; SI-NEXT:    s_and_b32 s8, s13, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s9, s13, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
-; SI-NEXT:    s_cselect_b32 s7, s8, s7
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s9, s7
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
 ; SI-NEXT:    s_cselect_b32 s7, s13, s7
 ; SI-NEXT:    s_cselect_b32 s6, s12, s6
-; SI-NEXT:    v_mov_b32_e32 v11, s7
-; SI-NEXT:    v_mov_b32_e32 v10, s6
-; SI-NEXT:    v_add_f64 v[10:11], s[12:13], -v[10:11]
+; SI-NEXT:    v_mov_b32_e32 v10, s7
+; SI-NEXT:    v_mov_b32_e32 v9, s6
+; SI-NEXT:    v_add_f64 v[10:11], s[12:13], -v[9:10]
+; SI-NEXT:    v_mov_b32_e32 v13, s15
+; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[10:11]|, 0.5
+; SI-NEXT:    v_bfi_b32 v9, s3, v12, v13
 ; SI-NEXT:    v_add_f64 v[12:13], s[4:5], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[10:11]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s19, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s3
+; SI-NEXT:    s_and_b64 s[4:5], s[8:9], exec
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v14, s4
+; SI-NEXT:    s_bfe_u32 s4, s19, 0xb0014
+; SI-NEXT:    s_add_i32 s8, s4, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[4:5], s[20:21], s8
 ; SI-NEXT:    s_andn2_b64 s[4:5], s[18:19], s[4:5]
-; SI-NEXT:    s_and_b32 s8, s19, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s9, s19, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
 ; SI-NEXT:    s_cselect_b32 s4, 0, s4
-; SI-NEXT:    s_cselect_b32 s5, s8, s5
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s5, s9, s5
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
 ; SI-NEXT:    s_cselect_b32 s5, s19, s5
 ; SI-NEXT:    s_cselect_b32 s4, s18, s4
-; SI-NEXT:    v_mov_b32_e32 v11, s5
-; SI-NEXT:    v_mov_b32_e32 v10, s4
-; SI-NEXT:    v_add_f64 v[14:15], s[18:19], -v[10:11]
+; SI-NEXT:    v_mov_b32_e32 v10, s5
+; SI-NEXT:    v_mov_b32_e32 v9, s4
+; SI-NEXT:    v_add_f64 v[10:11], s[18:19], -v[9:10]
+; SI-NEXT:    v_mov_b32_e32 v15, s13
+; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[10:11]|, 0.5
+; SI-NEXT:    v_bfi_b32 v9, s3, v14, v15
 ; SI-NEXT:    v_add_f64 v[10:11], s[6:7], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[14:15]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
-; SI-NEXT:    s_bfe_u32 s3, s17, 0xb0014
-; SI-NEXT:    s_addk_i32 s3, 0xfc01
-; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s3
+; SI-NEXT:    s_and_b64 s[6:7], s[8:9], exec
+; SI-NEXT:    s_cselect_b32 s6, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s6
+; SI-NEXT:    s_bfe_u32 s6, s17, 0xb0014
+; SI-NEXT:    s_add_i32 s8, s6, 0xfffffc01
+; SI-NEXT:    s_lshr_b64 s[6:7], s[20:21], s8
 ; SI-NEXT:    s_andn2_b64 s[6:7], s[16:17], s[6:7]
-; SI-NEXT:    s_and_b32 s8, s17, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
+; SI-NEXT:    s_and_b32 s9, s17, 0x80000000
+; SI-NEXT:    s_cmp_lt_i32 s8, 0
 ; SI-NEXT:    s_cselect_b32 s6, 0, s6
-; SI-NEXT:    s_cselect_b32 s7, s8, s7
-; SI-NEXT:    s_cmp_gt_i32 s3, 51
+; SI-NEXT:    s_cselect_b32 s7, s9, s7
+; SI-NEXT:    s_cmp_gt_i32 s8, 51
 ; SI-NEXT:    s_cselect_b32 s7, s17, s7
 ; SI-NEXT:    s_cselect_b32 s6, s16, s6
 ; SI-NEXT:    v_mov_b32_e32 v15, s7
 ; SI-NEXT:    v_mov_b32_e32 v14, s6
 ; SI-NEXT:    v_add_f64 v[14:15], s[16:17], -v[14:15]
+; SI-NEXT:    v_mov_b32_e32 v16, s19
+; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[14:15]|, 0.5
+; SI-NEXT:    v_bfi_b32 v9, s3, v9, v16
 ; SI-NEXT:    v_add_f64 v[16:17], s[4:5], v[8:9]
-; SI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[14:15]|, 0.5
-; SI-NEXT:    s_or_b32 s3, s8, 0x3ff00000
-; SI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; SI-NEXT:    s_cselect_b32 s3, s3, 0
-; SI-NEXT:    v_mov_b32_e32 v9, s3
+; SI-NEXT:    s_and_b64 s[4:5], s[8:9], exec
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; SI-NEXT:    v_mov_b32_e32 v9, s4
+; SI-NEXT:    v_mov_b32_e32 v14, s17
+; SI-NEXT:    v_bfi_b32 v9, s3, v9, v14
 ; SI-NEXT:    v_add_f64 v[14:15], s[6:7], v[8:9]
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
@@ -541,74 +573,83 @@ define amdgpu_kernel void @round_v8f64(ptr addrspace(1) %out, <8 x double> %in)
 ; CI-LABEL: round_v8f64:
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_load_dwordx16 s[4:19], s[0:1], 0x19
-; CI-NEXT:    s_mov_b32 s20, 0
+; CI-NEXT:    s_brev_b32 s2, -2
+; CI-NEXT:    v_mov_b32_e32 v12, 0
 ; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; CI-NEXT:    s_mov_b32 s3, 0xf000
 ; CI-NEXT:    s_waitcnt lgkmcnt(0)
 ; CI-NEXT:    v_trunc_f64_e32 v[0:1], s[6:7]
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[4:5]
 ; CI-NEXT:    v_add_f64 v[2:3], s[6:7], -v[0:1]
-; CI-NEXT:    s_and_b32 s2, s7, 0x80000000
+; CI-NEXT:    v_mov_b32_e32 v6, s7
 ; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[6:7], s[4:5], -v[4:5]
+; CI-NEXT:    v_add_f64 v[2:3], s[4:5], -v[4:5]
+; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[2:3]|, 0.5
+; CI-NEXT:    v_mov_b32_e32 v7, s4
+; CI-NEXT:    v_bfi_b32 v13, s2, v7, v6
 ; CI-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[6:7], |v[6:7]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s5, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
 ; CI-NEXT:    v_trunc_f64_e32 v[6:7], s[10:11]
-; CI-NEXT:    s_and_b64 s[4:5], s[6:7], exec
-; CI-NEXT:    v_add_f64 v[2:3], v[0:1], s[20:21]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_add_f64 v[8:9], s[10:11], -v[6:7]
-; CI-NEXT:    v_add_f64 v[0:1], v[4:5], s[20:21]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[2:3], v[0:1], v[12:13]
+; CI-NEXT:    v_mov_b32_e32 v8, s4
+; CI-NEXT:    v_mov_b32_e32 v9, s5
+; CI-NEXT:    v_add_f64 v[0:1], s[10:11], -v[6:7]
+; CI-NEXT:    v_bfi_b32 v13, s2, v8, v9
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[0:1]|, 0.5
+; CI-NEXT:    v_add_f64 v[0:1], v[4:5], v[12:13]
 ; CI-NEXT:    v_trunc_f64_e32 v[4:5], s[8:9]
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s11, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[4:5]
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
+; CI-NEXT:    v_add_f64 v[8:9], s[8:9], -v[4:5]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v10, s4
 ; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
 ; CI-NEXT:    v_trunc_f64_e32 v[8:9], s[14:15]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    s_and_b32 s2, s9, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[10:11], s[14:15], -v[8:9]
+; CI-NEXT:    v_mov_b32_e32 v11, s11
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    v_trunc_f64_e32 v[12:13], s[12:13]
-; CI-NEXT:    v_add_f64 v[6:7], v[6:7], s[20:21]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
+; CI-NEXT:    v_bfi_b32 v13, s2, v10, v11
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[10:11], s[14:15], -v[8:9]
+; CI-NEXT:    v_add_f64 v[6:7], v[6:7], v[12:13]
+; CI-NEXT:    v_mov_b32_e32 v13, s4
+; CI-NEXT:    v_mov_b32_e32 v14, s9
+; CI-NEXT:    v_bfi_b32 v13, s2, v13, v14
 ; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[10:11]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s15, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[14:15], s[12:13], -v[12:13]
+; CI-NEXT:    v_trunc_f64_e32 v[14:15], s[12:13]
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    v_add_f64 v[4:5], v[4:5], s[20:21]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[14:15]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s13, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_trunc_f64_e32 v[14:15], s[18:19]
+; CI-NEXT:    v_add_f64 v[10:11], s[12:13], -v[14:15]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[4:5], v[4:5], v[12:13]
+; CI-NEXT:    v_mov_b32_e32 v13, s4
+; CI-NEXT:    v_mov_b32_e32 v16, s15
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[10:11]|, 0.5
+; CI-NEXT:    v_bfi_b32 v13, s2, v13, v16
+; CI-NEXT:    v_trunc_f64_e32 v[16:17], s[18:19]
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    v_add_f64 v[10:11], v[8:9], s[20:21]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_add_f64 v[16:17], s[18:19], -v[14:15]
-; CI-NEXT:    v_add_f64 v[8:9], v[12:13], s[20:21]
-; CI-NEXT:    v_trunc_f64_e32 v[12:13], s[16:17]
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[16:17]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s19, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
-; CI-NEXT:    v_add_f64 v[16:17], s[16:17], -v[12:13]
+; CI-NEXT:    v_add_f64 v[10:11], v[8:9], v[12:13]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[8:9], s[18:19], -v[16:17]
+; CI-NEXT:    v_mov_b32_e32 v13, s4
+; CI-NEXT:    v_mov_b32_e32 v18, s13
+; CI-NEXT:    v_bfi_b32 v13, s2, v13, v18
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[8:9]|, 0.5
+; CI-NEXT:    v_trunc_f64_e32 v[18:19], s[16:17]
+; CI-NEXT:    v_add_f64 v[8:9], v[14:15], v[12:13]
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[16:17]|, 0.5
-; CI-NEXT:    s_and_b32 s2, s17, 0x80000000
-; CI-NEXT:    s_or_b32 s2, s2, 0x3ff00000
+; CI-NEXT:    v_add_f64 v[14:15], s[16:17], -v[18:19]
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_mov_b32_e32 v13, s4
+; CI-NEXT:    v_cmp_ge_f64_e64 s[4:5], |v[14:15]|, 0.5
+; CI-NEXT:    v_mov_b32_e32 v20, s19
 ; CI-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; CI-NEXT:    v_add_f64 v[14:15], v[14:15], s[20:21]
-; CI-NEXT:    s_cselect_b32 s21, s2, 0
-; CI-NEXT:    v_add_f64 v[12:13], v[12:13], s[20:21]
+; CI-NEXT:    v_bfi_b32 v13, s2, v13, v20
+; CI-NEXT:    s_cselect_b32 s4, 0x3ff00000, 0
+; CI-NEXT:    v_add_f64 v[14:15], v[16:17], v[12:13]
+; CI-NEXT:    v_mov_b32_e32 v13, s4
+; CI-NEXT:    v_mov_b32_e32 v16, s17
+; CI-NEXT:    v_bfi_b32 v13, s2, v13, v16
+; CI-NEXT:    v_add_f64 v[12:13], v[18:19], v[12:13]
 ; CI-NEXT:    s_mov_b32 s2, -1
 ; CI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:48
 ; CI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:32
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.round.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.ll
index 1489092aef5ffdc..2e9197dfd3d9d8f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.ll
@@ -8,37 +8,37 @@
 define amdgpu_kernel void @round_f32(ptr addrspace(1) %out, float %x) #0 {
 ; GFX6-LABEL: round_f32:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dword s4, s[0:1], 0xb
+; GFX6-NEXT:    s_load_dword s6, s[0:1], 0xb
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GFX6-NEXT:    s_brev_b32 s5, -2
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s4
-; GFX6-NEXT:    v_mov_b32_e32 v0, s4
-; GFX6-NEXT:    v_sub_f32_e32 v2, s4, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s5, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v0, v1, v0
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX6-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[4:5]
+; GFX6-NEXT:    s_brev_b32 s4, -2
+; GFX6-NEXT:    v_mov_b32_e32 v2, s6
+; GFX6-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: round_f32:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX8-NEXT:    s_load_dword s6, s[0:1], 0x2c
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX8-NEXT:    s_brev_b32 s5, -2
 ; GFX8-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX8-NEXT:    s_mov_b32 s2, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_trunc_f32_e32 v1, s4
-; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_sub_f32_e32 v2, s4, v1
-; GFX8-NEXT:    v_bfi_b32 v0, s5, 1.0, v0
-; GFX8-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_add_f32_e32 v0, v1, v0
+; GFX8-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX8-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX8-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[4:5]
+; GFX8-NEXT:    s_brev_b32 s4, -2
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_bfi_b32 v1, s4, v1, v2
+; GFX8-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX8-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX8-NEXT:    s_endpgm
 ;
@@ -46,17 +46,17 @@ define amdgpu_kernel void @round_f32(ptr addrspace(1) %out, float %x) #0 {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    s_brev_b32 s0, -2
 ; GFX9-NEXT:    s_mov_b32 s7, 0xf000
 ; GFX9-NEXT:    s_mov_b32 s6, -1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_trunc_f32_e32 v1, s2
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    v_sub_f32_e32 v2, s2, v1
-; GFX9-NEXT:    v_bfi_b32 v0, s0, 1.0, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX9-NEXT:    v_add_f32_e32 v0, v1, v0
+; GFX9-NEXT:    v_trunc_f32_e32 v0, s2
+; GFX9-NEXT:    v_sub_f32_e32 v1, s2, v0
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_bfi_b32 v1, s0, v1, v2
+; GFX9-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX9-NEXT:    s_endpgm
 ;
@@ -65,16 +65,17 @@ define amdgpu_kernel void @round_f32(ptr addrspace(1) %out, float %x) #0 {
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b32 s2, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_trunc_f32_e32 v0, s2
-; GFX11-NEXT:    v_bfi_b32 v2, 0x7fffffff, 1.0, s2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_f32_e32 v1, s2, v0
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s3, |v1|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s3
+; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX11-NEXT:    v_bfi_b32 v1, 0x7fffffff, v1, s2
 ; GFX11-NEXT:    s_mov_b32 s2, -1
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v1|, 0.5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_f32_e32 v0, v0, v1
 ; GFX11-NEXT:    buffer_store_b32 v0, off, s[0:3], 0
 ; GFX11-NEXT:    s_nop 0
@@ -83,17 +84,16 @@ define amdgpu_kernel void @round_f32(ptr addrspace(1) %out, float %x) #0 {
 ;
 ; R600-LABEL: round_f32:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 8, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 7, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
 ; R600-NEXT:     TRUNC * T0.W, KC0[2].Z,
 ; R600-NEXT:     ADD * T1.W, KC0[2].Z, -PV.W,
-; R600-NEXT:     BFI_INT T2.W, literal.x, 1.0, KC0[2].Z,
 ; R600-NEXT:     SETGE * T1.W, |PV.W|, 0.5,
+; R600-NEXT:     BFI_INT * T1.W, literal.x, PV.W, KC0[2].Z,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE * T1.W, PS, 0.0, PV.W,
 ; R600-NEXT:     ADD T0.X, T0.W, PV.W,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
@@ -114,22 +114,22 @@ define amdgpu_kernel void @round_v2f32(ptr addrspace(1) %out, <2 x float> %in) #
 ; GFX6-NEXT:    s_mov_b32 s7, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s3
-; GFX6-NEXT:    v_mov_b32_e32 v0, s3
-; GFX6-NEXT:    v_sub_f32_e32 v2, s3, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_trunc_f32_e32 v2, s2
-; GFX6-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX6-NEXT:    v_mov_b32_e32 v0, s2
-; GFX6-NEXT:    v_sub_f32_e32 v3, s2, v2
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s3
+; GFX6-NEXT:    v_sub_f32_e32 v1, s3, v0
 ; GFX6-NEXT:    s_mov_b32 s4, s0
 ; GFX6-NEXT:    s_mov_b32 s5, s1
-; GFX6-NEXT:    v_add_f32_e32 v0, v2, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX6-NEXT:    v_mov_b32_e32 v2, s3
+; GFX6-NEXT:    v_bfi_b32 v1, s8, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s2
+; GFX6-NEXT:    v_sub_f32_e32 v2, s2, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[0:1]
+; GFX6-NEXT:    v_mov_b32_e32 v3, s2
+; GFX6-NEXT:    v_bfi_b32 v2, s8, v2, v3
+; GFX6-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -140,22 +140,22 @@ define amdgpu_kernel void @round_v2f32(ptr addrspace(1) %out, <2 x float> %in) #
 ; GFX89-NEXT:    s_mov_b32 s7, 0xf000
 ; GFX89-NEXT:    s_mov_b32 s6, -1
 ; GFX89-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX89-NEXT:    v_trunc_f32_e32 v1, s3
-; GFX89-NEXT:    v_mov_b32_e32 v0, s3
-; GFX89-NEXT:    v_sub_f32_e32 v2, s3, v1
-; GFX89-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX89-NEXT:    v_trunc_f32_e32 v2, s2
-; GFX89-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX89-NEXT:    v_mov_b32_e32 v0, s2
-; GFX89-NEXT:    v_sub_f32_e32 v3, s2, v2
-; GFX89-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s3
+; GFX89-NEXT:    v_sub_f32_e32 v1, s3, v0
 ; GFX89-NEXT:    s_mov_b32 s4, s0
 ; GFX89-NEXT:    s_mov_b32 s5, s1
-; GFX89-NEXT:    v_add_f32_e32 v0, v2, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX89-NEXT:    v_mov_b32_e32 v2, s3
+; GFX89-NEXT:    v_bfi_b32 v1, s8, v1, v2
+; GFX89-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s2
+; GFX89-NEXT:    v_sub_f32_e32 v2, s2, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v2|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[0:1]
+; GFX89-NEXT:    v_mov_b32_e32 v3, s2
+; GFX89-NEXT:    v_bfi_b32 v2, s8, v2, v3
+; GFX89-NEXT:    v_add_f32_e32 v0, v0, v2
 ; GFX89-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GFX89-NEXT:    s_endpgm
 ;
@@ -165,18 +165,23 @@ define amdgpu_kernel void @round_v2f32(ptr addrspace(1) %out, <2 x float> %in) #
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_trunc_f32_e32 v0, s3
 ; GFX11-NEXT:    v_trunc_f32_e32 v2, s2
-; GFX11-NEXT:    v_bfi_b32 v3, 0x7fffffff, 1.0, s3
-; GFX11-NEXT:    v_bfi_b32 v5, 0x7fffffff, 1.0, s2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_dual_sub_f32 v1, s3, v0 :: v_dual_sub_f32 v4, s2, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_sub_f32_e32 v1, s3, v0
+; GFX11-NEXT:    v_sub_f32_e32 v3, s2, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s4, |v1|, 0.5
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s4, |v3|, 0.5
+; GFX11-NEXT:    v_bfi_b32 v1, 0x7fffffff, v1, s3
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1.0, s4
 ; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX11-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_bfi_b32 v3, 0x7fffffff, v3, s2
 ; GFX11-NEXT:    s_mov_b32 s2, -1
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v1|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v4|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v3, 0, v5, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_dual_add_f32 v1, v0, v1 :: v_dual_add_f32 v0, v2, v3
+; GFX11-NEXT:    v_add_f32_e32 v0, v2, v3
 ; GFX11-NEXT:    buffer_store_b64 v[0:1], off, s[0:3], 0
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -184,25 +189,23 @@ define amdgpu_kernel void @round_v2f32(ptr addrspace(1) %out, <2 x float> %in) #
 ;
 ; R600-LABEL: round_v2f32:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 15, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 13, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
-; R600-NEXT:     TRUNC T0.W, KC0[3].X,
-; R600-NEXT:     TRUNC * T1.W, KC0[2].W,
-; R600-NEXT:     ADD * T2.W, KC0[3].X, -PV.W,
-; R600-NEXT:     SETGE T0.Z, |PV.W|, 0.5,
-; R600-NEXT:     BFI_INT T2.W, literal.x, 1.0, KC0[3].X,
-; R600-NEXT:     ADD * T3.W, KC0[2].W, -T1.W,
+; R600-NEXT:     TRUNC * T0.W, KC0[3].X,
+; R600-NEXT:     ADD T1.W, KC0[3].X, -PV.W,
+; R600-NEXT:     TRUNC * T2.W, KC0[2].W,
+; R600-NEXT:     ADD T3.W, KC0[2].W, -PS,
+; R600-NEXT:     SETGE * T1.W, |PV.W|, 0.5,
+; R600-NEXT:     BFI_INT T1.W, literal.x, PS, KC0[3].X,
+; R600-NEXT:     SETGE * T3.W, |PV.W|, 0.5,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     SETGE T1.Z, |PS|, 0.5,
-; R600-NEXT:     BFI_INT T3.W, literal.x, 1.0, KC0[2].W,
-; R600-NEXT:     CNDE * T2.W, PV.Z, 0.0, PV.W, BS:VEC_021/SCL_122
+; R600-NEXT:     ADD T0.Y, T0.W, PV.W,
+; R600-NEXT:     BFI_INT * T0.W, literal.x, PS, KC0[2].W,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     ADD T0.Y, T0.W, PS,
-; R600-NEXT:     CNDE * T0.W, PV.Z, 0.0, PV.W,
-; R600-NEXT:     ADD T0.X, T1.W, PV.W,
+; R600-NEXT:     ADD T0.X, T2.W, PV.W,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %result = call <2 x float> @llvm.round.v2f32(<2 x float> %in) #1
@@ -215,38 +218,38 @@ define amdgpu_kernel void @round_v4f32(ptr addrspace(1) %out, <4 x float> %in) #
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GFX6-NEXT:    s_brev_b32 s8, -2
+; GFX6-NEXT:    s_brev_b32 s10, -2
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX6-NEXT:    v_mov_b32_e32 v0, s7
-; GFX6-NEXT:    v_sub_f32_e32 v2, s7, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v3, v1, v0
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s6
-; GFX6-NEXT:    v_mov_b32_e32 v0, s6
-; GFX6-NEXT:    v_sub_f32_e32 v2, s6, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v2, v1, v0
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s5
-; GFX6-NEXT:    v_mov_b32_e32 v0, s5
-; GFX6-NEXT:    v_sub_f32_e32 v4, s5, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_trunc_f32_e32 v4, s4
-; GFX6-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX6-NEXT:    v_mov_b32_e32 v0, s4
-; GFX6-NEXT:    v_sub_f32_e32 v5, s4, v4
-; GFX6-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v0, v4, v0
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX6-NEXT:    v_sub_f32_e32 v1, s7, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v2, s7
+; GFX6-NEXT:    v_bfi_b32 v1, s10, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v3, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX6-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[8:9]
+; GFX6-NEXT:    v_mov_b32_e32 v2, s6
+; GFX6-NEXT:    v_bfi_b32 v1, s10, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v2, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s5
+; GFX6-NEXT:    v_sub_f32_e32 v1, s5, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[6:7]
+; GFX6-NEXT:    v_mov_b32_e32 v4, s5
+; GFX6-NEXT:    v_bfi_b32 v1, s10, v1, v4
+; GFX6-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s4
+; GFX6-NEXT:    v_sub_f32_e32 v4, s4, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v4|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v4, 0, 1.0, s[6:7]
+; GFX6-NEXT:    v_mov_b32_e32 v5, s4
+; GFX6-NEXT:    v_bfi_b32 v4, s10, v4, v5
+; GFX6-NEXT:    v_add_f32_e32 v0, v0, v4
 ; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -254,38 +257,38 @@ define amdgpu_kernel void @round_v4f32(ptr addrspace(1) %out, <4 x float> %in) #
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX8-NEXT:    s_brev_b32 s8, -2
+; GFX8-NEXT:    s_brev_b32 s10, -2
 ; GFX8-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX8-NEXT:    s_mov_b32 s2, -1
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX8-NEXT:    v_mov_b32_e32 v0, s7
-; GFX8-NEXT:    v_sub_f32_e32 v2, s7, v1
-; GFX8-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX8-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_add_f32_e32 v3, v1, v0
-; GFX8-NEXT:    v_trunc_f32_e32 v1, s6
-; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_sub_f32_e32 v2, s6, v1
-; GFX8-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX8-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_add_f32_e32 v2, v1, v0
-; GFX8-NEXT:    v_trunc_f32_e32 v1, s5
-; GFX8-NEXT:    v_mov_b32_e32 v0, s5
-; GFX8-NEXT:    v_sub_f32_e32 v4, s5, v1
-; GFX8-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX8-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_trunc_f32_e32 v4, s4
-; GFX8-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_sub_f32_e32 v5, s4, v4
-; GFX8-NEXT:    v_bfi_b32 v0, s8, 1.0, v0
-; GFX8-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX8-NEXT:    v_add_f32_e32 v0, v4, v0
+; GFX8-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX8-NEXT:    v_sub_f32_e32 v1, s7, v0
+; GFX8-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[8:9]
+; GFX8-NEXT:    v_mov_b32_e32 v2, s7
+; GFX8-NEXT:    v_bfi_b32 v1, s10, v1, v2
+; GFX8-NEXT:    v_add_f32_e32 v3, v0, v1
+; GFX8-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX8-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX8-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[8:9]
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_bfi_b32 v1, s10, v1, v2
+; GFX8-NEXT:    v_add_f32_e32 v2, v0, v1
+; GFX8-NEXT:    v_trunc_f32_e32 v0, s5
+; GFX8-NEXT:    v_sub_f32_e32 v1, s5, v0
+; GFX8-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[6:7]
+; GFX8-NEXT:    v_mov_b32_e32 v4, s5
+; GFX8-NEXT:    v_bfi_b32 v1, s10, v1, v4
+; GFX8-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX8-NEXT:    v_trunc_f32_e32 v0, s4
+; GFX8-NEXT:    v_sub_f32_e32 v4, s4, v0
+; GFX8-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v4|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, 0, 1.0, s[6:7]
+; GFX8-NEXT:    v_mov_b32_e32 v5, s4
+; GFX8-NEXT:    v_bfi_b32 v4, s10, v4, v5
+; GFX8-NEXT:    v_add_f32_e32 v0, v0, v4
 ; GFX8-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX8-NEXT:    s_endpgm
 ;
@@ -293,38 +296,38 @@ define amdgpu_kernel void @round_v4f32(ptr addrspace(1) %out, <4 x float> %in) #
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x34
 ; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x24
-; GFX9-NEXT:    s_brev_b32 s0, -2
+; GFX9-NEXT:    s_brev_b32 s2, -2
 ; GFX9-NEXT:    s_mov_b32 s11, 0xf000
 ; GFX9-NEXT:    s_mov_b32 s10, -1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_sub_f32_e32 v2, s7, v1
-; GFX9-NEXT:    v_bfi_b32 v0, s0, 1.0, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX9-NEXT:    v_add_f32_e32 v3, v1, v0
-; GFX9-NEXT:    v_trunc_f32_e32 v1, s6
-; GFX9-NEXT:    v_mov_b32_e32 v0, s6
-; GFX9-NEXT:    v_sub_f32_e32 v2, s6, v1
-; GFX9-NEXT:    v_bfi_b32 v0, s0, 1.0, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX9-NEXT:    v_add_f32_e32 v2, v1, v0
-; GFX9-NEXT:    v_trunc_f32_e32 v1, s5
-; GFX9-NEXT:    v_mov_b32_e32 v0, s5
-; GFX9-NEXT:    v_sub_f32_e32 v4, s5, v1
-; GFX9-NEXT:    v_bfi_b32 v0, s0, 1.0, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX9-NEXT:    v_trunc_f32_e32 v4, s4
-; GFX9-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:    v_sub_f32_e32 v5, s4, v4
-; GFX9-NEXT:    v_bfi_b32 v0, s0, 1.0, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX9-NEXT:    v_add_f32_e32 v0, v4, v0
+; GFX9-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX9-NEXT:    v_sub_f32_e32 v1, s7, v0
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v2, s7
+; GFX9-NEXT:    v_bfi_b32 v1, s2, v1, v2
+; GFX9-NEXT:    v_add_f32_e32 v3, v0, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX9-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v2, s6
+; GFX9-NEXT:    v_bfi_b32 v1, s2, v1, v2
+; GFX9-NEXT:    v_add_f32_e32 v2, v0, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v0, s5
+; GFX9-NEXT:    v_sub_f32_e32 v1, s5, v0
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v4, s5
+; GFX9-NEXT:    v_bfi_b32 v1, s2, v1, v4
+; GFX9-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v0, s4
+; GFX9-NEXT:    v_sub_f32_e32 v4, s4, v0
+; GFX9-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, 1.0, s[0:1]
+; GFX9-NEXT:    v_mov_b32_e32 v5, s4
+; GFX9-NEXT:    v_bfi_b32 v4, s2, v4, v5
+; GFX9-NEXT:    v_add_f32_e32 v0, v0, v4
 ; GFX9-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
 ; GFX9-NEXT:    s_endpgm
 ;
@@ -334,31 +337,35 @@ define amdgpu_kernel void @round_v4f32(ptr addrspace(1) %out, <4 x float> %in) #
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x34
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
 ; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-NEXT:    s_mov_b32 s2, -1
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, 1.0, s7
-; GFX11-NEXT:    v_trunc_f32_e32 v4, s6
-; GFX11-NEXT:    v_trunc_f32_e32 v5, s5
-; GFX11-NEXT:    v_bfi_b32 v2, 0x7fffffff, 1.0, s6
-; GFX11-NEXT:    v_sub_f32_e32 v7, s7, v1
-; GFX11-NEXT:    v_trunc_f32_e32 v6, s4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_sub_f32 v9, s6, v4 :: v_dual_sub_f32 v10, s5, v5
-; GFX11-NEXT:    v_bfi_b32 v3, 0x7fffffff, 1.0, s5
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v7|, 0.5
-; GFX11-NEXT:    v_bfi_b32 v8, 0x7fffffff, 1.0, s4
-; GFX11-NEXT:    v_dual_sub_f32 v11, s4, v6 :: v_dual_cndmask_b32 v0, 0, v0
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v9|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v10|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v11|, 0.5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_dual_add_f32 v3, v1, v0 :: v_dual_add_f32 v2, v4, v2
-; GFX11-NEXT:    v_dual_cndmask_b32 v8, 0, v8 :: v_dual_add_f32 v1, v5, v7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_add_f32_e32 v0, v6, v8
+; GFX11-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX11-NEXT:    v_trunc_f32_e32 v1, s6
+; GFX11-NEXT:    v_trunc_f32_e32 v4, s5
+; GFX11-NEXT:    v_trunc_f32_e32 v5, s4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_dual_sub_f32 v2, s7, v0 :: v_dual_sub_f32 v3, s6, v1
+; GFX11-NEXT:    v_dual_sub_f32 v6, s5, v4 :: v_dual_sub_f32 v7, s4, v5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v2|, 0.5
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v3|, 0.5
+; GFX11-NEXT:    v_bfi_b32 v2, 0x7fffffff, v2, s7
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v6|, 0.5
+; GFX11-NEXT:    v_bfi_b32 v8, 0x7fffffff, v3, s6
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v7|, 0.5
+; GFX11-NEXT:    v_dual_add_f32 v3, v0, v2 :: v_dual_add_f32 v2, v1, v8
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_bfi_b32 v6, 0x7fffffff, v6, s5
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, 0, 1.0, s2
+; GFX11-NEXT:    s_mov_b32 s2, -1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_bfi_b32 v7, 0x7fffffff, v7, s4
+; GFX11-NEXT:    v_dual_add_f32 v1, v4, v6 :: v_dual_add_f32 v0, v5, v7
 ; GFX11-NEXT:    buffer_store_b128 v[0:3], off, s[0:3], 0
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -366,40 +373,36 @@ define amdgpu_kernel void @round_v4f32(ptr addrspace(1) %out, <4 x float> %in) #
 ;
 ; R600-LABEL: round_v4f32:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 29, @4, KC0[CB0:0-32], KC1[]
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; R600-NEXT:    ALU 25, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T4.XYZW, T0.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
 ; R600-NEXT:    ALU clause starting at 4:
 ; R600-NEXT:     TRUNC * T0.W, KC0[4].X,
 ; R600-NEXT:     ADD T1.W, KC0[4].X, -PV.W,
 ; R600-NEXT:     TRUNC * T2.W, KC0[3].W,
-; R600-NEXT:     TRUNC T0.Y, KC0[3].Z,
-; R600-NEXT:     ADD T0.Z, KC0[3].W, -PS,
-; R600-NEXT:     BFI_INT T3.W, literal.x, 1.0, KC0[4].X,
+; R600-NEXT:     TRUNC T0.Z, KC0[3].Z,
+; R600-NEXT:     ADD T3.W, KC0[3].W, -PS,
 ; R600-NEXT:     SETGE * T1.W, |PV.W|, 0.5,
+; R600-NEXT:     BFI_INT T0.Y, literal.x, PS, KC0[4].X,
+; R600-NEXT:     SETGE T1.Z, |PV.W|, 0.5,
+; R600-NEXT:     ADD * T1.W, KC0[3].Z, -PV.Z,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T0.X, PS, 0.0, PV.W,
-; R600-NEXT:     TRUNC T1.Y, KC0[3].Y,
-; R600-NEXT:     SETGE T0.Z, |PV.Z|, 0.5,
-; R600-NEXT:     BFI_INT T1.W, literal.x, 1.0, KC0[3].W,
-; R600-NEXT:     ADD * T3.W, KC0[3].Z, -PV.Y,
+; R600-NEXT:     TRUNC * T3.W, KC0[3].Y,
+; R600-NEXT:     ADD T1.Y, KC0[3].Y, -PV.W,
+; R600-NEXT:     SETGE T2.Z, |T1.W|, 0.5,
+; R600-NEXT:     BFI_INT T1.W, literal.x, T1.Z, KC0[3].W,
+; R600-NEXT:     ADD * T4.W, T0.W, T0.Y,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     SETGE T1.X, |PS|, 0.5,
-; R600-NEXT:     BFI_INT T2.Y, literal.x, 1.0, KC0[3].Z,
-; R600-NEXT:     CNDE T0.Z, PV.Z, 0.0, PV.W,
-; R600-NEXT:     ADD T1.W, KC0[3].Y, -PV.Y,
-; R600-NEXT:     ADD * T0.W, T0.W, PV.X,
+; R600-NEXT:     ADD T4.Z, T2.W, PV.W,
+; R600-NEXT:     BFI_INT T0.W, literal.x, PV.Z, KC0[3].Z,
+; R600-NEXT:     SETGE * T1.W, |PV.Y|, 0.5,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     SETGE T3.Y, |PV.W|, 0.5,
-; R600-NEXT:     ADD T0.Z, T2.W, PV.Z,
-; R600-NEXT:     BFI_INT T1.W, literal.x, 1.0, KC0[3].Y,
-; R600-NEXT:     CNDE * T2.W, PV.X, 0.0, PV.Y, BS:VEC_021/SCL_122
+; R600-NEXT:     ADD T4.Y, T0.Z, PV.W,
+; R600-NEXT:     BFI_INT * T0.W, literal.x, PS, KC0[3].Y,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     ADD T0.Y, T0.Y, PS,
-; R600-NEXT:     CNDE * T1.W, PV.Y, 0.0, PV.W,
-; R600-NEXT:     ADD T0.X, T1.Y, PV.W,
-; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:     ADD T4.X, T3.W, PV.W,
+; R600-NEXT:     LSHR * T0.X, KC0[2].Y, literal.x,
 ; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
   %result = call <4 x float> @llvm.round.v4f32(<4 x float> %in) #1
   store <4 x float> %result, ptr addrspace(1) %out
@@ -410,67 +413,67 @@ define amdgpu_kernel void @round_v8f32(ptr addrspace(1) %out, <8 x float> %in) #
 ; GFX6-LABEL: round_v8f32:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x11
-; GFX6-NEXT:    s_brev_b32 s12, -2
+; GFX6-NEXT:    s_brev_b32 s14, -2
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX6-NEXT:    v_mov_b32_e32 v0, s7
-; GFX6-NEXT:    v_sub_f32_e32 v2, s7, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v3, v1, v0
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s6
-; GFX6-NEXT:    v_mov_b32_e32 v0, s6
-; GFX6-NEXT:    v_sub_f32_e32 v2, s6, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_add_f32_e32 v2, v1, v0
-; GFX6-NEXT:    v_trunc_f32_e32 v1, s5
-; GFX6-NEXT:    v_mov_b32_e32 v0, s5
-; GFX6-NEXT:    v_sub_f32_e32 v4, s5, v1
-; GFX6-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_trunc_f32_e32 v4, s4
-; GFX6-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX6-NEXT:    v_mov_b32_e32 v0, s4
-; GFX6-NEXT:    v_sub_f32_e32 v5, s4, v4
-; GFX6-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX6-NEXT:    v_trunc_f32_e32 v5, s11
-; GFX6-NEXT:    v_add_f32_e32 v0, v4, v0
-; GFX6-NEXT:    v_mov_b32_e32 v4, s11
-; GFX6-NEXT:    v_sub_f32_e32 v6, s11, v5
-; GFX6-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_add_f32_e32 v7, v5, v4
-; GFX6-NEXT:    v_trunc_f32_e32 v5, s10
-; GFX6-NEXT:    v_mov_b32_e32 v4, s10
-; GFX6-NEXT:    v_sub_f32_e32 v6, s10, v5
-; GFX6-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_add_f32_e32 v6, v5, v4
-; GFX6-NEXT:    v_trunc_f32_e32 v5, s9
-; GFX6-NEXT:    v_mov_b32_e32 v4, s9
-; GFX6-NEXT:    v_sub_f32_e32 v8, s9, v5
-; GFX6-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v8|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_trunc_f32_e32 v8, s8
-; GFX6-NEXT:    v_add_f32_e32 v5, v5, v4
-; GFX6-NEXT:    v_mov_b32_e32 v4, s8
-; GFX6-NEXT:    v_sub_f32_e32 v9, s8, v8
-; GFX6-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v9|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX6-NEXT:    v_add_f32_e32 v4, v8, v4
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX6-NEXT:    v_sub_f32_e32 v1, s7, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[12:13], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[12:13]
+; GFX6-NEXT:    v_mov_b32_e32 v2, s7
+; GFX6-NEXT:    v_bfi_b32 v1, s14, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v3, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX6-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[12:13], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[12:13]
+; GFX6-NEXT:    v_mov_b32_e32 v2, s6
+; GFX6-NEXT:    v_bfi_b32 v1, s14, v1, v2
+; GFX6-NEXT:    v_add_f32_e32 v2, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s5
+; GFX6-NEXT:    v_sub_f32_e32 v1, s5, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[6:7]
+; GFX6-NEXT:    v_mov_b32_e32 v4, s5
+; GFX6-NEXT:    v_bfi_b32 v1, s14, v1, v4
+; GFX6-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX6-NEXT:    v_trunc_f32_e32 v0, s4
+; GFX6-NEXT:    v_sub_f32_e32 v4, s4, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v4|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v4, 0, 1.0, s[6:7]
+; GFX6-NEXT:    v_mov_b32_e32 v5, s4
+; GFX6-NEXT:    v_bfi_b32 v4, s14, v4, v5
+; GFX6-NEXT:    v_add_f32_e32 v0, v0, v4
+; GFX6-NEXT:    v_trunc_f32_e32 v4, s11
+; GFX6-NEXT:    v_sub_f32_e32 v5, s11, v4
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v6, s11
+; GFX6-NEXT:    v_bfi_b32 v5, s14, v5, v6
+; GFX6-NEXT:    v_add_f32_e32 v7, v4, v5
+; GFX6-NEXT:    v_trunc_f32_e32 v4, s10
+; GFX6-NEXT:    v_sub_f32_e32 v5, s10, v4
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v6, s10
+; GFX6-NEXT:    v_bfi_b32 v5, s14, v5, v6
+; GFX6-NEXT:    v_add_f32_e32 v6, v4, v5
+; GFX6-NEXT:    v_trunc_f32_e32 v4, s9
+; GFX6-NEXT:    v_sub_f32_e32 v5, s9, v4
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v8, s9
+; GFX6-NEXT:    v_bfi_b32 v5, s14, v5, v8
+; GFX6-NEXT:    v_add_f32_e32 v5, v4, v5
+; GFX6-NEXT:    v_trunc_f32_e32 v4, s8
+; GFX6-NEXT:    v_sub_f32_e32 v8, s8, v4
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v8|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, 1.0, s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v9, s8
+; GFX6-NEXT:    v_bfi_b32 v8, s14, v8, v9
+; GFX6-NEXT:    v_add_f32_e32 v4, v4, v8
 ; GFX6-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
@@ -478,67 +481,67 @@ define amdgpu_kernel void @round_v8f32(ptr addrspace(1) %out, <8 x float> %in) #
 ; GFX89-LABEL: round_v8f32:
 ; GFX89:       ; %bb.0:
 ; GFX89-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0x44
-; GFX89-NEXT:    s_brev_b32 s12, -2
+; GFX89-NEXT:    s_brev_b32 s14, -2
 ; GFX89-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX89-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX89-NEXT:    s_mov_b32 s2, -1
 ; GFX89-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX89-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX89-NEXT:    v_mov_b32_e32 v0, s7
-; GFX89-NEXT:    v_sub_f32_e32 v2, s7, v1
-; GFX89-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX89-NEXT:    v_add_f32_e32 v3, v1, v0
-; GFX89-NEXT:    v_trunc_f32_e32 v1, s6
-; GFX89-NEXT:    v_mov_b32_e32 v0, s6
-; GFX89-NEXT:    v_sub_f32_e32 v2, s6, v1
-; GFX89-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX89-NEXT:    v_add_f32_e32 v2, v1, v0
-; GFX89-NEXT:    v_trunc_f32_e32 v1, s5
-; GFX89-NEXT:    v_mov_b32_e32 v0, s5
-; GFX89-NEXT:    v_sub_f32_e32 v4, s5, v1
-; GFX89-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX89-NEXT:    v_trunc_f32_e32 v4, s4
-; GFX89-NEXT:    v_add_f32_e32 v1, v1, v0
-; GFX89-NEXT:    v_mov_b32_e32 v0, s4
-; GFX89-NEXT:    v_sub_f32_e32 v5, s4, v4
-; GFX89-NEXT:    v_bfi_b32 v0, s12, 1.0, v0
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GFX89-NEXT:    v_trunc_f32_e32 v5, s11
-; GFX89-NEXT:    v_add_f32_e32 v0, v4, v0
-; GFX89-NEXT:    v_mov_b32_e32 v4, s11
-; GFX89-NEXT:    v_sub_f32_e32 v6, s11, v5
-; GFX89-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX89-NEXT:    v_add_f32_e32 v7, v5, v4
-; GFX89-NEXT:    v_trunc_f32_e32 v5, s10
-; GFX89-NEXT:    v_mov_b32_e32 v4, s10
-; GFX89-NEXT:    v_sub_f32_e32 v6, s10, v5
-; GFX89-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v6|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX89-NEXT:    v_add_f32_e32 v6, v5, v4
-; GFX89-NEXT:    v_trunc_f32_e32 v5, s9
-; GFX89-NEXT:    v_mov_b32_e32 v4, s9
-; GFX89-NEXT:    v_sub_f32_e32 v8, s9, v5
-; GFX89-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v8|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX89-NEXT:    v_trunc_f32_e32 v8, s8
-; GFX89-NEXT:    v_add_f32_e32 v5, v5, v4
-; GFX89-NEXT:    v_mov_b32_e32 v4, s8
-; GFX89-NEXT:    v_sub_f32_e32 v9, s8, v8
-; GFX89-NEXT:    v_bfi_b32 v4, s12, 1.0, v4
-; GFX89-NEXT:    v_cmp_ge_f32_e64 vcc, |v9|, 0.5
-; GFX89-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GFX89-NEXT:    v_add_f32_e32 v4, v8, v4
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX89-NEXT:    v_sub_f32_e32 v1, s7, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[12:13], |v1|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[12:13]
+; GFX89-NEXT:    v_mov_b32_e32 v2, s7
+; GFX89-NEXT:    v_bfi_b32 v1, s14, v1, v2
+; GFX89-NEXT:    v_add_f32_e32 v3, v0, v1
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s6
+; GFX89-NEXT:    v_sub_f32_e32 v1, s6, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[12:13], |v1|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[12:13]
+; GFX89-NEXT:    v_mov_b32_e32 v2, s6
+; GFX89-NEXT:    v_bfi_b32 v1, s14, v1, v2
+; GFX89-NEXT:    v_add_f32_e32 v2, v0, v1
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s5
+; GFX89-NEXT:    v_sub_f32_e32 v1, s5, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v1, 0, 1.0, s[6:7]
+; GFX89-NEXT:    v_mov_b32_e32 v4, s5
+; GFX89-NEXT:    v_bfi_b32 v1, s14, v1, v4
+; GFX89-NEXT:    v_add_f32_e32 v1, v0, v1
+; GFX89-NEXT:    v_trunc_f32_e32 v0, s4
+; GFX89-NEXT:    v_sub_f32_e32 v4, s4, v0
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v4|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v4, 0, 1.0, s[6:7]
+; GFX89-NEXT:    v_mov_b32_e32 v5, s4
+; GFX89-NEXT:    v_bfi_b32 v4, s14, v4, v5
+; GFX89-NEXT:    v_add_f32_e32 v0, v0, v4
+; GFX89-NEXT:    v_trunc_f32_e32 v4, s11
+; GFX89-NEXT:    v_sub_f32_e32 v5, s11, v4
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX89-NEXT:    v_mov_b32_e32 v6, s11
+; GFX89-NEXT:    v_bfi_b32 v5, s14, v5, v6
+; GFX89-NEXT:    v_add_f32_e32 v7, v4, v5
+; GFX89-NEXT:    v_trunc_f32_e32 v4, s10
+; GFX89-NEXT:    v_sub_f32_e32 v5, s10, v4
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX89-NEXT:    v_mov_b32_e32 v6, s10
+; GFX89-NEXT:    v_bfi_b32 v5, s14, v5, v6
+; GFX89-NEXT:    v_add_f32_e32 v6, v4, v5
+; GFX89-NEXT:    v_trunc_f32_e32 v4, s9
+; GFX89-NEXT:    v_sub_f32_e32 v5, s9, v4
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v5|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[4:5]
+; GFX89-NEXT:    v_mov_b32_e32 v8, s9
+; GFX89-NEXT:    v_bfi_b32 v5, s14, v5, v8
+; GFX89-NEXT:    v_add_f32_e32 v5, v4, v5
+; GFX89-NEXT:    v_trunc_f32_e32 v4, s8
+; GFX89-NEXT:    v_sub_f32_e32 v8, s8, v4
+; GFX89-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v8|, 0.5
+; GFX89-NEXT:    v_cndmask_b32_e64 v8, 0, 1.0, s[4:5]
+; GFX89-NEXT:    v_mov_b32_e32 v9, s8
+; GFX89-NEXT:    v_bfi_b32 v8, s14, v8, v9
+; GFX89-NEXT:    v_add_f32_e32 v4, v4, v8
 ; GFX89-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; GFX89-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX89-NEXT:    s_endpgm
@@ -549,55 +552,66 @@ define amdgpu_kernel void @round_v8f32(ptr addrspace(1) %out, <8 x float> %in) #
 ; GFX11-NEXT:    s_load_b256 s[4:11], s[0:1], 0x44
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
 ; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-NEXT:    s_mov_b32 s2, -1
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_trunc_f32_e32 v1, s7
-; GFX11-NEXT:    v_trunc_f32_e32 v4, s6
-; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, 1.0, s7
-; GFX11-NEXT:    v_trunc_f32_e32 v5, s5
-; GFX11-NEXT:    v_bfi_b32 v2, 0x7fffffff, 1.0, s6
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_sub_f32 v16, s7, v1 :: v_dual_sub_f32 v17, s6, v4
+; GFX11-NEXT:    v_trunc_f32_e32 v0, s7
+; GFX11-NEXT:    v_trunc_f32_e32 v1, s6
+; GFX11-NEXT:    v_trunc_f32_e32 v4, s5
 ; GFX11-NEXT:    v_trunc_f32_e32 v8, s4
-; GFX11-NEXT:    v_bfi_b32 v7, 0x7fffffff, 1.0, s11
-; GFX11-NEXT:    v_trunc_f32_e32 v9, s11
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v16|, 0.5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT:    v_dual_sub_f32 v18, s5, v5 :: v_dual_sub_f32 v19, s4, v8
-; GFX11-NEXT:    v_bfi_b32 v3, 0x7fffffff, 1.0, s5
-; GFX11-NEXT:    v_trunc_f32_e32 v11, s10
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v17|, 0.5
-; GFX11-NEXT:    v_sub_f32_e32 v20, s11, v9
-; GFX11-NEXT:    v_trunc_f32_e32 v13, s9
-; GFX11-NEXT:    v_sub_f32_e32 v21, s10, v11
-; GFX11-NEXT:    v_bfi_b32 v10, 0x7fffffff, 1.0, s10
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v18|, 0.5
-; GFX11-NEXT:    v_trunc_f32_e32 v15, s8
-; GFX11-NEXT:    v_sub_f32_e32 v22, s9, v13
-; GFX11-NEXT:    v_bfi_b32 v12, 0x7fffffff, 1.0, s9
-; GFX11-NEXT:    v_bfi_b32 v14, 0x7fffffff, 1.0, s8
-; GFX11-NEXT:    v_cndmask_b32_e32 v16, 0, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v20|, 0.5
-; GFX11-NEXT:    v_sub_f32_e32 v23, s8, v15
-; GFX11-NEXT:    v_bfi_b32 v6, 0x7fffffff, 1.0, s4
-; GFX11-NEXT:    v_cndmask_b32_e32 v7, 0, v7, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v21|, 0.5
-; GFX11-NEXT:    v_dual_add_f32 v3, v1, v0 :: v_dual_add_f32 v2, v4, v2
-; GFX11-NEXT:    v_add_f32_e32 v1, v5, v16
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_dual_add_f32 v7, v9, v7 :: v_dual_cndmask_b32 v0, 0, v10
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v22|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v4, 0, v12, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v23|, 0.5
-; GFX11-NEXT:    v_dual_add_f32 v5, v13, v4 :: v_dual_cndmask_b32 v10, 0, v14
-; GFX11-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v19|, 0.5
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_f32_e32 v4, v15, v10
-; GFX11-NEXT:    v_cndmask_b32_e32 v12, 0, v6, vcc_lo
-; GFX11-NEXT:    v_add_f32_e32 v6, v11, v0
-; GFX11-NEXT:    v_add_f32_e32 v0, v8, v12
+; GFX11-NEXT:    v_trunc_f32_e32 v5, s11
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_dual_sub_f32 v2, s7, v0 :: v_dual_sub_f32 v3, s6, v1
+; GFX11-NEXT:    v_sub_f32_e32 v7, s5, v4
+; GFX11-NEXT:    v_trunc_f32_e32 v9, s9
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_sub_f32_e32 v12, s11, v5
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v2|, 0.5
+; GFX11-NEXT:    v_sub_f32_e32 v11, s4, v8
+; GFX11-NEXT:    v_trunc_f32_e32 v6, s10
+; GFX11-NEXT:    v_sub_f32_e32 v14, s9, v9
+; GFX11-NEXT:    v_trunc_f32_e32 v10, s8
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v3|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_bfi_b32 v2, 0x7fffffff, v2, s7
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v7|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_bfi_b32 v16, 0x7fffffff, v3, s6
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v11|, 0.5
+; GFX11-NEXT:    v_sub_f32_e32 v13, s10, v6
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_dual_add_f32 v3, v0, v2 :: v_dual_add_f32 v2, v1, v16
+; GFX11-NEXT:    v_bfi_b32 v7, 0x7fffffff, v7, s5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v12|, 0.5
+; GFX11-NEXT:    v_add_f32_e32 v1, v4, v7
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_bfi_b32 v11, 0x7fffffff, v11, s4
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v13|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_bfi_b32 v12, 0x7fffffff, v12, s11
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, 0, 1.0, s2
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v14|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_add_f32_e32 v7, v5, v12
+; GFX11-NEXT:    v_bfi_b32 v13, 0x7fffffff, v13, s10
+; GFX11-NEXT:    v_sub_f32_e32 v15, s8, v10
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, 0, 1.0, s2
+; GFX11-NEXT:    v_add_f32_e32 v6, v6, v13
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_cmp_ge_f32_e64 s2, |v15|, 0.5
+; GFX11-NEXT:    v_bfi_b32 v0, 0x7fffffff, v14, s9
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, 0, 1.0, s2
+; GFX11-NEXT:    v_dual_add_f32 v5, v9, v0 :: v_dual_add_f32 v0, v8, v11
+; GFX11-NEXT:    s_mov_b32 s2, -1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_bfi_b32 v4, 0x7fffffff, v15, s8
+; GFX11-NEXT:    v_add_f32_e32 v4, v10, v4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    buffer_store_b128 v[4:7], off, s[0:3], 0 offset:16
 ; GFX11-NEXT:    buffer_store_b128 v[0:3], off, s[0:3], 0
@@ -607,68 +621,58 @@ define amdgpu_kernel void @round_v8f32(ptr addrspace(1) %out, <8 x float> %in) #
 ;
 ; R600-LABEL: round_v8f32:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 60, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 50, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T2.X, 0
-; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T5.XYZW, T1.X, 1
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T3.XYZW, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    ALU clause starting at 4:
 ; R600-NEXT:     TRUNC * T0.W, KC0[6].X,
-; R600-NEXT:     BFI_INT T0.X, literal.x, 1.0, KC0[5].Z,
-; R600-NEXT:     BFI_INT T0.Y, literal.x, 1.0, KC0[4].Y,
-; R600-NEXT:     TRUNC T0.Z, KC0[4].Y,
-; R600-NEXT:     TRUNC * T1.W, KC0[5].Z,
-; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     ADD * T2.W, KC0[6].X, -T0.W,
-; R600-NEXT:     TRUNC T1.Y, KC0[5].X,
-; R600-NEXT:     SETGE T1.Z, |PV.W|, 0.5,
-; R600-NEXT:     BFI_INT * T2.W, literal.x, 1.0, KC0[6].X,
-; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     ADD * T3.W, KC0[5].Z, -T1.W,
-; R600-NEXT:     SETGE T1.X, |PV.W|, 0.5,
-; R600-NEXT:     CNDE T2.Y, T1.Z, 0.0, T2.W,
-; R600-NEXT:     ADD T1.Z, KC0[5].X, -T1.Y,
+; R600-NEXT:     ADD T0.Z, KC0[6].X, -PV.W,
+; R600-NEXT:     TRUNC * T1.W, KC0[5].X,
 ; R600-NEXT:     TRUNC * T2.W, KC0[4].W,
-; R600-NEXT:     ADD * T3.W, KC0[4].Y, -T0.Z,
-; R600-NEXT:     SETGE T2.X, |PV.W|, 0.5,
-; R600-NEXT:     ADD T3.Y, KC0[4].W, -T2.W,
-; R600-NEXT:     TRUNC T2.Z, KC0[4].Z,
-; R600-NEXT:     BFI_INT T3.W, literal.x, 1.0, KC0[5].X,
-; R600-NEXT:     SETGE * T4.W, |T1.Z|, 0.5,
+; R600-NEXT:     ADD T1.Z, KC0[4].W, -PV.W,
+; R600-NEXT:     ADD T3.W, KC0[5].X, -T1.W,
+; R600-NEXT:     SETGE * T4.W, |T0.Z|, 0.5,
+; R600-NEXT:     BFI_INT T0.Y, literal.x, PS, KC0[6].X,
+; R600-NEXT:     SETGE T0.Z, |PV.W|, 0.5,
+; R600-NEXT:     SETGE T3.W, |PV.Z|, 0.5,
+; R600-NEXT:     TRUNC * T4.W, KC0[5].Y,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T3.X, PS, 0.0, PV.W,
-; R600-NEXT:     ADD T4.Y, KC0[4].Z, -PV.Z,
-; R600-NEXT:     SETGE T1.Z, |PV.Y|, 0.5,
-; R600-NEXT:     BFI_INT T3.W, literal.x, 1.0, KC0[4].W,
-; R600-NEXT:     TRUNC * T4.W, KC0[5].W,
+; R600-NEXT:     ADD T1.Y, KC0[5].Y, -PS,
+; R600-NEXT:     BFI_INT T1.Z, literal.x, PV.W, KC0[4].W,
+; R600-NEXT:     BFI_INT T3.W, literal.x, PV.Z, KC0[5].X,
+; R600-NEXT:     TRUNC * T5.W, KC0[4].Z,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     ADD T4.X, KC0[5].W, -PS,
-; R600-NEXT:     CNDE T3.Y, PV.Z, 0.0, PV.W,
-; R600-NEXT:     SETGE T1.Z, |PV.Y|, 0.5,
-; R600-NEXT:     BFI_INT T3.W, literal.x, 1.0, KC0[4].Z,
-; R600-NEXT:     ADD * T5.W, T1.Y, PV.X,
+; R600-NEXT:     TRUNC T0.Z, KC0[4].Y,
+; R600-NEXT:     TRUNC * T6.W, KC0[5].W,
+; R600-NEXT:     ADD * T7.W, KC0[4].Z, -T5.W,
+; R600-NEXT:     TRUNC T0.X, KC0[5].Z,
+; R600-NEXT:     SETGE T2.Y, |PV.W|, 0.5,
+; R600-NEXT:     ADD T2.Z, KC0[5].W, -T6.W, BS:VEC_102/SCL_221
+; R600-NEXT:     ADD T7.W, KC0[4].Y, -T0.Z,
+; R600-NEXT:     ADD * T3.W, T1.W, T3.W,
+; R600-NEXT:     SETGE T1.X, |PV.W|, 0.5,
+; R600-NEXT:     SETGE T4.Y, |PV.Z|, 0.5,
+; R600-NEXT:     ADD T3.Z, T2.W, T1.Z,
+; R600-NEXT:     BFI_INT T1.W, literal.x, PV.Y, KC0[4].Z,
+; R600-NEXT:     ADD * T2.W, KC0[5].Z, -PV.X,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     TRUNC T3.X, KC0[5].Y,
-; R600-NEXT:     CNDE T1.Y, PV.Z, 0.0, PV.W,
-; R600-NEXT:     ADD T5.Z, T2.W, PV.Y,
-; R600-NEXT:     BFI_INT T2.W, literal.x, 1.0, KC0[5].W,
-; R600-NEXT:     SETGE * T3.W, |PV.X|, 0.5,
+; R600-NEXT:     SETGE T2.X, |PS|, 0.5,
+; R600-NEXT:     ADD T3.Y, T5.W, PV.W,
+; R600-NEXT:     BFI_INT T1.Z, literal.x, PV.Y, KC0[5].W,
+; R600-NEXT:     BFI_INT T1.W, literal.x, PV.X, KC0[4].Y,
+; R600-NEXT:     ADD * T0.W, T0.W, T0.Y,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T4.X, PS, 0.0, PV.W,
-; R600-NEXT:     ADD T5.Y, T2.Z, PV.Y,
-; R600-NEXT:     ADD T1.Z, KC0[5].Y, -PV.X,
-; R600-NEXT:     CNDE T2.W, T2.X, 0.0, T0.Y,
-; R600-NEXT:     ADD * T0.W, T0.W, T2.Y,
-; R600-NEXT:     ADD T5.X, T0.Z, PV.W,
-; R600-NEXT:     SETGE T1.Y, |PV.Z|, 0.5,
-; R600-NEXT:     ADD T0.Z, T4.W, PV.X,
-; R600-NEXT:     BFI_INT T2.W, literal.x, 1.0, KC0[5].Y,
-; R600-NEXT:     CNDE * T3.W, T1.X, 0.0, T0.X, BS:VEC_021/SCL_122
+; R600-NEXT:     ADD T3.X, T0.Z, PV.W,
+; R600-NEXT:     ADD T0.Z, T6.W, PV.Z,
+; R600-NEXT:     BFI_INT T1.W, literal.x, PV.X, KC0[5].Z,
+; R600-NEXT:     SETGE * T2.W, |T1.Y|, 0.5,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
 ; R600-NEXT:     LSHR T1.X, KC0[2].Y, literal.x,
-; R600-NEXT:     ADD T0.Y, T1.W, PS,
-; R600-NEXT:     CNDE * T1.W, PV.Y, 0.0, PV.W,
-; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
-; R600-NEXT:     ADD T0.X, T3.X, PV.W,
+; R600-NEXT:     ADD T0.Y, T0.X, PV.W,
+; R600-NEXT:     BFI_INT * T1.W, literal.y, PS, KC0[5].Y,
+; R600-NEXT:    2(2.802597e-45), 2147483647(nan)
+; R600-NEXT:     ADD T0.X, T4.W, PV.W,
 ; R600-NEXT:     ADD_INT * T1.W, KC0[2].Y, literal.x,
 ; R600-NEXT:    16(2.242078e-44), 0(0.000000e+00)
 ; R600-NEXT:     LSHR * T2.X, PV.W, literal.x,
@@ -683,55 +687,55 @@ define amdgpu_kernel void @round_f16(ptr addrspace(1) %out, i32 %x.arg) #0 {
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; GFX6-NEXT:    v_trunc_f32_e32 v1, v0
+; GFX6-NEXT:    v_sub_f32_e32 v2, v0, v1
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v2|, 0.5
+; GFX6-NEXT:    v_cndmask_b32_e64 v2, 0, 1.0, s[2:3]
 ; GFX6-NEXT:    s_brev_b32 s2, -2
-; GFX6-NEXT:    v_trunc_f32_e32 v2, v0
-; GFX6-NEXT:    v_bfi_b32 v1, s2, 1.0, v0
-; GFX6-NEXT:    v_sub_f32_e32 v0, v0, v2
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; GFX6-NEXT:    v_add_f32_e32 v0, v2, v0
+; GFX6-NEXT:    v_bfi_b32 v0, s2, v2, v0
+; GFX6-NEXT:    v_add_f32_e32 v0, v1, v0
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: round_f16:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX8-NEXT:    s_load_dword s0, s[0:1], 0x2c
-; GFX8-NEXT:    s_movk_i32 s1, 0x7fff
+; GFX8-NEXT:    s_load_dword s4, s[0:1], 0x2c
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX8-NEXT:    v_mov_b32_e32 v0, 0x3c00
-; GFX8-NEXT:    s_mov_b32 s7, 0xf000
-; GFX8-NEXT:    s_mov_b32 s6, -1
+; GFX8-NEXT:    s_movk_i32 s5, 0x7fff
+; GFX8-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_mov_b32_e32 v1, s0
-; GFX8-NEXT:    v_bfi_b32 v0, s1, v0, v1
-; GFX8-NEXT:    v_trunc_f16_e32 v1, s0
-; GFX8-NEXT:    v_sub_f16_e32 v2, s0, v1
+; GFX8-NEXT:    v_trunc_f16_e32 v1, s4
+; GFX8-NEXT:    v_sub_f16_e32 v2, s4, v1
 ; GFX8-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
 ; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v2, s4
+; GFX8-NEXT:    v_bfi_b32 v0, s5, v0, v2
+; GFX8-NEXT:    s_mov_b32 s2, -1
 ; GFX8-NEXT:    v_add_f16_e32 v0, v1, v0
-; GFX8-NEXT:    buffer_store_short v0, off, s[4:7], 0
+; GFX8-NEXT:    buffer_store_short v0, off, s[0:3], 0
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: round_f16:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3c00
+; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
 ; GFX9-NEXT:    s_mov_b32 s7, 0xf000
-; GFX9-NEXT:    s_mov_b32 s6, -1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v1
 ; GFX9-NEXT:    v_trunc_f16_e32 v1, s2
 ; GFX9-NEXT:    v_sub_f16_e32 v2, s2, v1
 ; GFX9-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v2, s2
+; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v2
+; GFX9-NEXT:    s_mov_b32 s6, -1
 ; GFX9-NEXT:    v_add_f16_e32 v0, v1, v0
 ; GFX9-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GFX9-NEXT:    s_endpgm
@@ -741,18 +745,16 @@ define amdgpu_kernel void @round_f16(ptr addrspace(1) %out, i32 %x.arg) #0 {
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b32 s2, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    s_movk_i32 s3, 0x3c00
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    v_trunc_f16_e32 v0, s2
-; GFX11-NEXT:    v_mov_b32_e32 v1, s2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_sub_f16_e32 v2, s2, v0
-; GFX11-NEXT:    v_bfi_b32 v1, 0x7fff, s3, v1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_sub_f16_e32 v1, s2, v0
+; GFX11-NEXT:    v_cmp_ge_f16_e64 s3, |v1|, 0.5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, 0x3c00, s3
 ; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
+; GFX11-NEXT:    v_bfi_b32 v1, 0x7fff, v1, s2
 ; GFX11-NEXT:    s_mov_b32 s2, -1
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_cmp_ge_f16_e64 vcc_lo, |v2|, 0.5
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc_lo
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_f16_e32 v0, v0, v1
 ; GFX11-NEXT:    buffer_store_b16 v0, off, s[0:3], 0
@@ -762,7 +764,7 @@ define amdgpu_kernel void @round_f16(ptr addrspace(1) %out, i32 %x.arg) #0 {
 ;
 ; R600-LABEL: round_f16:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 19, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 17, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT MSKOR T0.XW, T1.X
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
@@ -770,12 +772,10 @@ define amdgpu_kernel void @round_f16(ptr addrspace(1) %out, i32 %x.arg) #0 {
 ; R600-NEXT:     FLT16_TO_FLT32 * T0.W, KC0[2].Z,
 ; R600-NEXT:     TRUNC * T1.W, PV.W,
 ; R600-NEXT:     ADD * T2.W, T0.W, -PV.W,
-; R600-NEXT:     BFI_INT T0.W, literal.x, 1.0, T0.W,
 ; R600-NEXT:     SETGE * T2.W, |PV.W|, 0.5,
-; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T0.W, PS, 0.0, PV.W,
-; R600-NEXT:     AND_INT * T2.W, KC0[2].Y, literal.x,
-; R600-NEXT:    3(4.203895e-45), 0(0.000000e+00)
+; R600-NEXT:     BFI_INT T0.W, literal.x, PV.W, T0.W,
+; R600-NEXT:     AND_INT * T2.W, KC0[2].Y, literal.y,
+; R600-NEXT:    2147483647(nan), 3(4.203895e-45)
 ; R600-NEXT:     ADD * T0.W, T1.W, PV.W,
 ; R600-NEXT:     FLT32_TO_FLT16 T0.W, PV.W,
 ; R600-NEXT:     LSHL * T1.W, T2.W, literal.x,
@@ -799,29 +799,29 @@ define amdgpu_kernel void @round_v2f16(ptr addrspace(1) %out, i32 %in.arg) #0 {
 ; GFX6-LABEL: round_v2f16:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GFX6-NEXT:    s_brev_b32 s4, -2
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    s_lshr_b32 s3, s2, 16
+; GFX6-NEXT:    v_cvt_f32_f16_e32 v1, s3
 ; GFX6-NEXT:    v_cvt_f32_f16_e32 v0, s2
-; GFX6-NEXT:    s_lshr_b32 s2, s2, 16
-; GFX6-NEXT:    v_cvt_f32_f16_e32 v1, s2
-; GFX6-NEXT:    s_brev_b32 s2, -2
-; GFX6-NEXT:    v_trunc_f32_e32 v3, v0
-; GFX6-NEXT:    v_bfi_b32 v2, s2, 1.0, v0
-; GFX6-NEXT:    v_trunc_f32_e32 v4, v1
-; GFX6-NEXT:    v_bfi_b32 v5, s2, 1.0, v1
-; GFX6-NEXT:    v_sub_f32_e32 v1, v1, v4
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, 0.5
-; GFX6-NEXT:    v_sub_f32_e32 v0, v0, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, 0, v5, vcc
-; GFX6-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, 0.5
-; GFX6-NEXT:    v_add_f32_e32 v1, v4, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX6-NEXT:    v_trunc_f32_e32 v3, v1
+; GFX6-NEXT:    v_sub_f32_e32 v5, v1, v3
+; GFX6-NEXT:    v_trunc_f32_e32 v2, v0
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v5|, 0.5
+; GFX6-NEXT:    v_sub_f32_e32 v4, v0, v2
+; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, 1.0, s[2:3]
+; GFX6-NEXT:    v_bfi_b32 v1, s4, v5, v1
+; GFX6-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v4|, 0.5
+; GFX6-NEXT:    v_add_f32_e32 v1, v3, v1
+; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, 1.0, s[2:3]
+; GFX6-NEXT:    v_bfi_b32 v0, s4, v3, v0
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; GFX6-NEXT:    v_add_f32_e32 v0, v3, v0
+; GFX6-NEXT:    v_add_f32_e32 v0, v2, v0
 ; GFX6-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX6-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
@@ -830,55 +830,55 @@ define amdgpu_kernel void @round_v2f16(ptr addrspace(1) %out, i32 %in.arg) #0 {
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dword s4, s[0:1], 0x2c
 ; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX8-NEXT:    s_movk_i32 s5, 0x7fff
-; GFX8-NEXT:    v_mov_b32_e32 v2, 0x3c00
+; GFX8-NEXT:    v_mov_b32_e32 v0, 0x3c00
+; GFX8-NEXT:    s_movk_i32 s6, 0x7fff
 ; GFX8-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_lshr_b32 s6, s4, 16
-; GFX8-NEXT:    v_trunc_f16_e32 v0, s6
-; GFX8-NEXT:    v_sub_f16_e32 v1, s6, v0
-; GFX8-NEXT:    v_mov_b32_e32 v3, s6
-; GFX8-NEXT:    v_bfi_b32 v3, s5, v2, v3
-; GFX8-NEXT:    v_cmp_ge_f16_e64 vcc, |v1|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v3, vcc
-; GFX8-NEXT:    v_add_f16_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT:    v_mov_b32_e32 v1, s4
-; GFX8-NEXT:    v_bfi_b32 v1, s5, v2, v1
+; GFX8-NEXT:    s_lshr_b32 s5, s4, 16
+; GFX8-NEXT:    v_trunc_f16_e32 v1, s5
+; GFX8-NEXT:    v_sub_f16_e32 v2, s5, v1
+; GFX8-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
+; GFX8-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v3, s5
+; GFX8-NEXT:    v_bfi_b32 v2, s6, v2, v3
+; GFX8-NEXT:    v_add_f16_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX8-NEXT:    v_trunc_f16_e32 v2, s4
 ; GFX8-NEXT:    v_sub_f16_e32 v3, s4, v2
 ; GFX8-NEXT:    v_cmp_ge_f16_e64 vcc, |v3|, 0.5
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX8-NEXT:    v_add_f16_e32 v1, v2, v1
+; GFX8-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v3, s4
+; GFX8-NEXT:    v_bfi_b32 v0, s6, v0, v3
+; GFX8-NEXT:    v_add_f16_e32 v0, v2, v0
 ; GFX8-NEXT:    s_mov_b32 s2, -1
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: round_v2f16:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; GFX9-NEXT:    s_movk_i32 s0, 0x7fff
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3c00
+; GFX9-NEXT:    s_movk_i32 s1, 0x7fff
 ; GFX9-NEXT:    s_mov_b32 s7, 0xf000
-; GFX9-NEXT:    s_mov_b32 s6, -1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_lshr_b32 s0, s2, 16
+; GFX9-NEXT:    v_trunc_f16_e32 v1, s0
+; GFX9-NEXT:    v_sub_f16_e32 v2, s0, v1
+; GFX9-NEXT:    v_cmp_ge_f16_e64 vcc, |v2|, 0.5
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, 0, v0, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v3, s0
+; GFX9-NEXT:    v_bfi_b32 v2, s1, v2, v3
+; GFX9-NEXT:    v_add_f16_e32 v1, v1, v2
 ; GFX9-NEXT:    v_trunc_f16_e32 v2, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX9-NEXT:    v_sub_f16_e32 v3, s2, v2
-; GFX9-NEXT:    v_bfi_b32 v1, s0, v0, v1
-; GFX9-NEXT:    v_cmp_ge_f16_e64 vcc, |v3|, 0.5
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
-; GFX9-NEXT:    s_lshr_b32 s1, s2, 16
-; GFX9-NEXT:    v_add_f16_e32 v1, v2, v1
-; GFX9-NEXT:    v_trunc_f16_e32 v2, s1
-; GFX9-NEXT:    v_sub_f16_e32 v3, s1, v2
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    v_bfi_b32 v0, s0, v0, v4
 ; GFX9-NEXT:    v_cmp_ge_f16_e64 vcc, |v3|, 0.5
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v3, s2
+; GFX9-NEXT:    v_bfi_b32 v0, s1, v0, v3
 ; GFX9-NEXT:    v_add_f16_e32 v0, v2, v0
-; GFX9-NEXT:    v_pack_b32_f16 v0, v1, v0
+; GFX9-NEXT:    s_mov_b32 s6, -1
+; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GFX9-NEXT:    s_endpgm
 ;
@@ -887,28 +887,29 @@ define amdgpu_kernel void @round_v2f16(ptr addrspace(1) %out, i32 %in.arg) #0 {
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b32 s2, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    s_movk_i32 s4, 0x3c00
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_trunc_f16_e32 v0, s2
 ; GFX11-NEXT:    s_lshr_b32 s3, s2, 16
-; GFX11-NEXT:    v_mov_b32_e32 v1, s2
-; GFX11-NEXT:    v_trunc_f16_e32 v2, s3
-; GFX11-NEXT:    v_mov_b32_e32 v3, s3
-; GFX11-NEXT:    v_sub_f16_e32 v4, s2, v0
-; GFX11-NEXT:    s_mov_b32 s2, -1
-; GFX11-NEXT:    v_bfi_b32 v1, 0x7fff, s4, v1
-; GFX11-NEXT:    v_sub_f16_e32 v5, s3, v2
-; GFX11-NEXT:    v_bfi_b32 v3, 0x7fff, s4, v3
-; GFX11-NEXT:    v_cmp_ge_f16_e64 vcc_lo, |v4|, 0.5
+; GFX11-NEXT:    v_trunc_f16_e32 v1, s2
+; GFX11-NEXT:    v_trunc_f16_e32 v0, s3
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_sub_f16_e32 v3, s2, v1
+; GFX11-NEXT:    v_sub_f16_e32 v2, s3, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_cmp_ge_f16_e64 s4, |v2|, 0.5
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 0x3c00, s4
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cmp_ge_f16_e64 s4, |v3|, 0.5
+; GFX11-NEXT:    v_bfi_b32 v2, 0x7fff, v2, s3
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 0x3c00, s4
 ; GFX11-NEXT:    s_mov_b32 s3, 0x31016000
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc_lo
-; GFX11-NEXT:    v_cmp_ge_f16_e64 vcc_lo, |v5|, 0.5
+; GFX11-NEXT:    v_add_f16_e32 v0, v0, v2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_f16_e32 v0, v0, v1
-; GFX11-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc_lo
-; GFX11-NEXT:    v_add_f16_e32 v1, v2, v3
+; GFX11-NEXT:    v_bfi_b32 v3, 0x7fff, v3, s2
+; GFX11-NEXT:    s_mov_b32 s2, -1
+; GFX11-NEXT:    v_add_f16_e32 v1, v1, v3
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_pack_b32_f16 v0, v0, v1
+; GFX11-NEXT:    v_pack_b32_f16 v0, v1, v0
 ; GFX11-NEXT:    buffer_store_b32 v0, off, s[0:3], 0
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -916,7 +917,7 @@ define amdgpu_kernel void @round_v2f16(ptr addrspace(1) %out, i32 %in.arg) #0 {
 ;
 ; R600-LABEL: round_v2f16:
 ; R600:       ; %bb.0:
-; R600-NEXT:    ALU 24, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    ALU 22, @4, KC0[CB0:0-32], KC1[]
 ; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
 ; R600-NEXT:    CF_END
 ; R600-NEXT:    PAD
@@ -928,20 +929,18 @@ define amdgpu_kernel void @round_v2f16(ptr addrspace(1) %out, i32 %in.arg) #0 {
 ; R600-NEXT:     TRUNC * T2.W, PV.W,
 ; R600-NEXT:     ADD T3.W, T0.W, -PS,
 ; R600-NEXT:     TRUNC * T4.W, PV.W,
-; R600-NEXT:     ADD T0.Z, T1.W, -PS,
-; R600-NEXT:     BFI_INT T0.W, literal.x, 1.0, T0.W,
+; R600-NEXT:     ADD T5.W, T1.W, -PS,
+; R600-NEXT:     SETGE * T3.W, |PV.W|, 0.5,
+; R600-NEXT:     BFI_INT T0.W, literal.x, PS, T0.W,
 ; R600-NEXT:     SETGE * T3.W, |PV.W|, 0.5,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T1.Z, PS, 0.0, PV.W,
-; R600-NEXT:     BFI_INT T0.W, literal.x, 1.0, T1.W,
-; R600-NEXT:     SETGE * T1.W, |PV.Z|, 0.5,
+; R600-NEXT:     BFI_INT T1.W, literal.x, PS, T1.W, BS:VEC_021/SCL_122
+; R600-NEXT:     ADD * T0.W, T2.W, PV.W,
 ; R600-NEXT:    2147483647(nan), 0(0.000000e+00)
-; R600-NEXT:     CNDE T0.W, PS, 0.0, PV.W,
-; R600-NEXT:     ADD * T1.W, T2.W, PV.Z,
-; R600-NEXT:     FLT32_TO_FLT16 T1.W, PS,
-; R600-NEXT:     ADD * T0.W, T4.W, PV.W,
 ; R600-NEXT:     FLT32_TO_FLT16 T0.W, PS,
-; R600-NEXT:     LSHL * T1.W, PV.W, literal.x,
+; R600-NEXT:     ADD * T1.W, T4.W, PV.W,
+; R600-NEXT:     FLT32_TO_FLT16 T1.W, PS,
+; R600-NEXT:     LSHL * T0.W, PV.W, literal.x,
 ; R600-NEXT:    16(2.242078e-44), 0(0.000000e+00)
 ; R600-NEXT:     OR_INT T0.X, PV.W, PS,
 ; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,



More information about the llvm-commits mailing list