[llvm] [AMDGPU][SDAG] Legalise v2i32 or/xor/and instructions to make use of 64-bit wide instructions (PR #140694)

Chris Jackson via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 6 05:48:02 PDT 2025


https://github.com/chrisjbris updated https://github.com/llvm/llvm-project/pull/140694

>From 995e7fb4b9cfdf4735729f6be44ad0b8e7f5aedb Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Tue, 20 May 2025 05:14:36 -0500
Subject: [PATCH 1/7] [AMDGPU][SDAG] Legalise v2i32 or/xor/and instructions to
 make use of 64-bit wide instructions

Make use of s_or_b64/s_and_b64/s_xor_b64 for v2i32. Legalising these
causes a number of test regressions, so extra work in the combiner and
Tablegen patterns was necessary.

- Use custom for v2i32 rotr instead of additional patterns. Modify
PerformOrCombine() to remove some identity or operations

- Fix rotr regression by adding lowerRotr() on the legalizer codepath

- Add test case to rotr.ll
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     | 105 +++++++++-
 llvm/lib/Target/AMDGPU/SIISelLowering.h       |   4 +
 llvm/lib/Target/AMDGPU/SIInstructions.td      |  17 +-
 llvm/lib/Target/AMDGPU/SOPInstructions.td     |  15 ++
 llvm/lib/Target/AMDGPU/VOP2Instructions.td    |   8 +-
 llvm/test/CodeGen/AMDGPU/and.ll               |   3 +-
 llvm/test/CodeGen/AMDGPU/bf16-conversions.ll  |  24 +--
 llvm/test/CodeGen/AMDGPU/bfi_int.ll           |   4 +-
 .../AMDGPU/copysign-simplify-demanded-bits.ll |   2 +-
 .../AMDGPU/dag-preserve-disjoint-flag.ll      |  36 +++-
 .../CodeGen/AMDGPU/fneg-modifier-casting.ll   |   2 +-
 llvm/test/CodeGen/AMDGPU/fshr.ll              | 188 +++++-------------
 llvm/test/CodeGen/AMDGPU/or.ll                |   4 +-
 llvm/test/CodeGen/AMDGPU/rotr.ll              | 128 ++++++++++++
 .../CodeGen/AMDGPU/vector_range_metadata.ll   |   8 +-
 llvm/test/CodeGen/AMDGPU/xor.ll               |   4 +-
 16 files changed, 376 insertions(+), 176 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ba7e11a853347..083a62638ee09 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -430,6 +430,14 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
     setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2i32, MVT::v2f32}, Legal);
   }
 
+  setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, MVT::v2i32, Legal);
+  // Prevent SELECT v2i32 from being implemented with the above bitwise ops and
+  // instead lower to cndmask in SITargetLowering::LowerSELECT().
+  setOperationAction(ISD::SELECT, MVT::v2i32, Custom);
+  // Enable MatchRotate to produce ISD::ROTR, which is later transformed to
+  // alignbit.
+  setOperationAction(ISD::ROTR, MVT::v2i32, Custom);
+
   setOperationAction(ISD::BUILD_VECTOR, {MVT::v4f16, MVT::v4i16, MVT::v4bf16},
                      Custom);
 
@@ -5929,6 +5937,20 @@ SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
   return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), VT, OpLo, OpHi);
 }
 
+// Enable lowering of ROTR for vxi32 types. This is a workaround for a
+// regression caused by legalising v2i32 or.
+SDValue SITargetLowering::lowerROTR(SDValue Op, SelectionDAG &DAG) const {
+  unsigned Opc = Op.getOpcode();
+  EVT VT = Op.getValueType();
+  assert(Opc == ISD::ROTR && "Expected ROTR Opcode for lowerROTR.");
+
+  assert((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v8i32 ||
+          VT == MVT::v16i32) &&
+         "Unexpected ValueType.");
+
+  return DAG.UnrollVectorOp(Op.getNode());
+}
+
 // Work around LegalizeDAG doing the wrong thing and fully scalarizing if the
 // wider vector type is legal.
 SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
@@ -6115,6 +6137,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return lowerGET_FPENV(Op, DAG);
   case ISD::SET_FPENV:
     return lowerSET_FPENV(Op, DAG);
+  case ISD::ROTR:
+    return lowerROTR(Op, DAG);
   }
   return SDValue();
 }
@@ -12872,6 +12896,53 @@ SDValue SITargetLowering::performOrCombine(SDNode *N,
     }
   }
 
+  // Detect identity v2i32 OR and replace with identity source node.
+  // Specifically an Or that has operands constructed from the same source node
+  // via extract_vector_elt and build_vector. I.E.
+  // v2i32 or(
+  //   v2i32 build_vector(
+  //     i32 extract_elt(%IdentitySrc, 0),
+  //     i32 0
+  //   ),
+  //   v2i32 build_vector(
+  //     i32 0,
+  //     i32 extract_elt(%IdentitySrc, 1)
+  //   )
+  // )
+  // =>
+  // v2i32 %IdentitySrc
+  if (VT == MVT::v2i32) {
+    if (LHS->getOpcode() == ISD::BUILD_VECTOR &&
+        RHS->getOpcode() == ISD::BUILD_VECTOR) {
+      LLVM_DEBUG(dbgs() << "### Performing v2i32 SIISelLowering "
+                           "DAGCombine::CombineOR\n";);
+
+      if (auto *LC = dyn_cast<ConstantSDNode>(LHS->getOperand(1)))
+        if (auto *RC = dyn_cast<ConstantSDNode>(RHS->getOperand(0))) {
+
+          // Test for and normalise build vectors.
+          if (LC->getZExtValue() == 0 && RC->getZExtValue() == 0) {
+
+            // Get the extract_vector_element operands.
+            SDValue LEVE = LHS->getOperand(0);
+            SDValue REVE = RHS->getOperand(1);
+
+            if (LEVE->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+                REVE->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
+              // Check that different elements from the same vector are
+              // extracted.
+              if (LEVE->getOperand(0) == REVE->getOperand(0) &&
+                  LEVE->getOperand(1) != REVE->getOperand(1)) {
+                LLVM_DEBUG(dbgs() << "### Found identity OR, folding...\n";);
+                SDValue IdentitySrc = LEVE.getOperand(0);
+                return IdentitySrc;
+              }
+            }
+          }
+        }
+    }
+  }
+
   if (VT != MVT::i64 || DCI.isBeforeLegalizeOps())
     return SDValue();
 
@@ -12916,13 +12987,43 @@ SDValue SITargetLowering::performXorCombine(SDNode *N,
   if (SDValue RV = reassociateScalarOps(N, DCI.DAG))
     return RV;
 
+  SelectionDAG &DAG = DCI.DAG;
+  EVT VT = N->getValueType(0);
   SDValue LHS = N->getOperand(0);
   SDValue RHS = N->getOperand(1);
 
+  if (VT == MVT::v2i32 && LHS.getNumOperands() > 1) {
+
+    const ConstantSDNode *CRHS_0 = dyn_cast<ConstantSDNode>(RHS.getOperand(0));
+    const ConstantSDNode *CRHS_1 = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
+    SDValue LHS_0 = LHS.getOperand(0);
+    SDValue LHS_1 = LHS.getOperand(1);
+
+    if (LHS.getOpcode() == ISD::VSELECT && VT == MVT::v2i32) {
+      if (CRHS_0 && CRHS_0->getAPIntValue().isSignMask() &&
+          shouldFoldFNegIntoSrc(N, LHS_0))
+        if (CRHS_1 && CRHS_1->getAPIntValue().isSignMask() &&
+            shouldFoldFNegIntoSrc(N, LHS_1)) {
+          SDLoc DL(N);
+          SDValue CastLHS =
+              DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, LHS->getOperand(1));
+          SDValue CastRHS =
+              DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, LHS->getOperand(2));
+          SDValue FNegLHS = DAG.getNode(ISD::FNEG, DL, MVT::v2f32, CastLHS);
+          SDValue FNegRHS = DAG.getNode(ISD::FNEG, DL, MVT::v2f32, CastRHS);
+          SDValue NewSelect = DAG.getNode(ISD::VSELECT, DL, MVT::v2f32,
+                                          LHS->getOperand(0), FNegLHS, FNegRHS);
+          return DAG.getNode(ISD::BITCAST, DL, VT, NewSelect);
+        }
+    }
+    // Possibly split vector here if one side does have a constant RHS.
+  }
+
+  // Add test for when only one of the RHS vector elements is a const. Might be
+  // possible to optimise this case.
+
   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
-  SelectionDAG &DAG = DCI.DAG;
 
-  EVT VT = N->getValueType(0);
   if (CRHS && VT == MVT::i64) {
     if (SDValue Split =
             splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index c42366a1c04c8..a1fdae384502a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -366,6 +366,9 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                         Type *Ty) const override;
 
+  // bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
+  //                                           EVT VT) const override;
+
   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                unsigned Index) const override;
   bool isExtractVecEltCheap(EVT VT, unsigned Index) const override;
@@ -437,6 +440,7 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerGET_FPENV(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerSET_FPENV(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerROTR(SDValue Op, SelectionDAG &DAG) const;
 
   Register getRegisterByName(const char* RegName, LLT VT,
                              const MachineFunction &MF) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 2e2913d88cc54..dd053703be74e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2334,9 +2334,9 @@ def : AMDGPUPatIgnoreCopies <
                 (COPY_TO_REGCLASS VSrc_b32:$z, VGPR_32))
 >;
 
-// 64-bit version
+foreach vt = [i64, v2i32] in {
 def : AMDGPUPatIgnoreCopies <
-  (DivergentBinFrag<xor> i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
+  (DivergentBinFrag<xor> vt:$z, (and vt:$x, (xor vt:$y, vt:$z))),
   (REG_SEQUENCE VReg_64,
     (V_BFI_B32_e64 (i32 (EXTRACT_SUBREG VReg_64:$x, sub0)),
               (i32 (EXTRACT_SUBREG VReg_64:$y, sub0)),
@@ -2345,6 +2345,7 @@ def : AMDGPUPatIgnoreCopies <
               (i32 (EXTRACT_SUBREG VReg_64:$y, sub1)),
               (i32 (EXTRACT_SUBREG VReg_64:$z, sub1))), sub1)
 >;
+}
 
 def : AMDGPUPat <
   (fcopysign f32:$src0, f32:$src1),
@@ -2375,9 +2376,14 @@ def : AMDGPUPat <
                $src1), sub1)
 >;
 
+def : AMDGPUPat <
+  (fneg (select i1:$src0, (f32 (bitconvert i32:$src1)), (f32 (bitconvert i32:$src2)))),
+    (V_CNDMASK_B32_e64 (i32 1), $src2, (i32 1), $src1, $src0)>;
+
 let True16Predicate = NotHasTrue16BitInsts in {
 def : ROTRPattern <V_ALIGNBIT_B32_e64>;
 
+
 def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))),
           (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)),
                           (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>;
@@ -2388,6 +2394,12 @@ def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
 } // end True16Predicate = NotHasTrue16BitInsts
 
 let True16Predicate = UseRealTrue16Insts in {
+
+// Prevents regression in fneg-modifier-casting.ll along with modifications to XorCombine() when v2i32 or is legal.
+def : AMDGPUPat <
+  (fneg (select i1:$src0, (f32 (bitconvert i32:$src1)), (f32 (bitconvert i32:$src2)))),
+    (V_CNDMASK_B32_e64 (i32 1), $src2, (i32 1), $src1, $src0)>;
+
 def : GCNPat <
   (rotr i32:$src0, i32:$src1),
   (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
@@ -2449,6 +2461,7 @@ def : GCNPat<(fshr i32:$src0, i32:$src1, i32:$src2),
 >;
 } // end True16Predicate = UseFakeTrue16Insts
 
+
 /********** ====================== **********/
 /**********   Indirect addressing  **********/
 /********** ====================== **********/
diff --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 40b3dfb94ce2f..f2e1a27644afb 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -1779,6 +1779,21 @@ def : GCNPat <
   (S_MOV_B32 imm:$imm)
 >;
 
+def : GCNPat <
+  (v2i32 (UniformBinFrag<and> v2i32:$x, v2i32:$y)),
+  (S_AND_B64 SReg_64:$x, SReg_64:$y)
+>;
+
+def : GCNPat <
+  (v2i32 (UniformBinFrag<or> v2i32:$x, v2i32:$y)),
+  (S_OR_B64 SReg_64:$x, SReg_64:$y)
+>;
+
+def : GCNPat <
+  (v2i32 (UniformBinFrag<xor> v2i32:$x, v2i32:$y)),
+  (S_XOR_B64 SReg_64:$x, SReg_64:$y)
+>;
+
 // Same as a 32-bit inreg
 def : GCNPat<
   (i32 (UniformUnaryFrag<sext> i16:$src)),
diff --git a/llvm/lib/Target/AMDGPU/VOP2Instructions.td b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
index 0c7e20fc1ebf3..efa9c465f794e 100644
--- a/llvm/lib/Target/AMDGPU/VOP2Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP2Instructions.td
@@ -954,9 +954,9 @@ def : DivergentClampingBinOp<sub, V_SUB_CO_U32_e64>;
 def : DivergentBinOp<adde, V_ADDC_U32_e32>;
 def : DivergentBinOp<sube, V_SUBB_U32_e32>;
 
-class divergent_i64_BinOp <SDPatternOperator Op, Instruction Inst> :
+class divergent_i64_BinOp <SDPatternOperator Op, Instruction Inst, ValueType vt = i64> :
   GCNPat<
-      (DivergentBinFrag<Op> i64:$src0, i64:$src1),
+      (DivergentBinFrag<Op> vt:$src0, vt:$src1),
       (REG_SEQUENCE VReg_64,
         (Inst
           (i32 (EXTRACT_SUBREG $src0, sub0)),
@@ -973,6 +973,10 @@ def :  divergent_i64_BinOp <and, V_AND_B32_e64>;
 def :  divergent_i64_BinOp <or,  V_OR_B32_e64>;
 def :  divergent_i64_BinOp <xor, V_XOR_B32_e64>;
 
+def :  divergent_i64_BinOp <and, V_AND_B32_e64, v2i32>;
+def :  divergent_i64_BinOp <or,  V_OR_B32_e64, v2i32>;
+def :  divergent_i64_BinOp <xor, V_XOR_B32_e64, v2i32>;
+
 // mul24 w/ 64 bit output.
 class mul24_64_Pat<SDPatternOperator Op, Instruction InstLo, Instruction InstHi> : GCNPat<
   (i64 (Op i32:$src0, i32:$src1)),
diff --git a/llvm/test/CodeGen/AMDGPU/and.ll b/llvm/test/CodeGen/AMDGPU/and.ll
index c6233642110ea..05402b3c89409 100644
--- a/llvm/test/CodeGen/AMDGPU/and.ll
+++ b/llvm/test/CodeGen/AMDGPU/and.ll
@@ -8,8 +8,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0
 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
-; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
-; SI: s_and_b32 s{{[0-9]+, s[0-9]+, s[0-9]+}}
+; SI: s_and_b64
 
 define amdgpu_kernel void @test2(ptr addrspace(1) %out, ptr addrspace(1) %in) {
   %b_ptr = getelementptr <2 x i32>, ptr addrspace(1) %in, i32 1
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
index a597faa028f22..ca8f7736f6093 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-conversions.ll
@@ -151,25 +151,25 @@ define amdgpu_ps float @v_test_cvt_v2f64_v2bf16_v(<2 x double> %src) {
 ; GFX-950-LABEL: v_test_cvt_v2f64_v2bf16_v:
 ; GFX-950:       ; %bb.0:
 ; GFX-950-NEXT:    v_cvt_f32_f64_e32 v6, v[2:3]
+; GFX-950-NEXT:    v_and_b32_e32 v4, 1, v6
+; GFX-950-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
 ; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[4:5], v6
-; GFX-950-NEXT:    v_and_b32_e32 v7, 1, v6
 ; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[2:3]|, |v[4:5]|
-; GFX-950-NEXT:    v_cmp_nlg_f64_e32 vcc, v[2:3], v[4:5]
-; GFX-950-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v7
+; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], v[2:3], v[4:5]
+; GFX-950-NEXT:    v_cvt_f32_f64_e32 v7, v[0:1]
 ; GFX-950-NEXT:    v_cndmask_b32_e64 v2, -1, 1, s[2:3]
 ; GFX-950-NEXT:    v_add_u32_e32 v2, v6, v2
-; GFX-950-NEXT:    s_or_b64 vcc, vcc, s[0:1]
-; GFX-950-NEXT:    v_cvt_f32_f64_e32 v5, v[0:1]
+; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
 ; GFX-950-NEXT:    v_cndmask_b32_e32 v4, v2, v6, vcc
-; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[2:3], v5
-; GFX-950-NEXT:    v_and_b32_e32 v6, 1, v5
+; GFX-950-NEXT:    v_cvt_f64_f32_e32 v[2:3], v7
+; GFX-950-NEXT:    v_and_b32_e32 v8, 1, v7
 ; GFX-950-NEXT:    v_cmp_gt_f64_e64 s[2:3], |v[0:1]|, |v[2:3]|
-; GFX-950-NEXT:    v_cmp_nlg_f64_e32 vcc, v[0:1], v[2:3]
-; GFX-950-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v6
+; GFX-950-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX-950-NEXT:    v_cmp_nlg_f64_e64 s[0:1], v[0:1], v[2:3]
 ; GFX-950-NEXT:    v_cndmask_b32_e64 v0, -1, 1, s[2:3]
-; GFX-950-NEXT:    v_add_u32_e32 v0, v5, v0
-; GFX-950-NEXT:    s_or_b64 vcc, vcc, s[0:1]
-; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; GFX-950-NEXT:    v_add_u32_e32 v0, v7, v0
+; GFX-950-NEXT:    s_or_b64 vcc, s[0:1], vcc
+; GFX-950-NEXT:    v_cndmask_b32_e32 v0, v0, v7, vcc
 ; GFX-950-NEXT:    v_cvt_pk_bf16_f32 v0, v0, v4
 ; GFX-950-NEXT:    ; return to shader part epilog
   %res = fptrunc <2 x double> %src to <2 x bfloat>
diff --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
index 201b97d479c68..d76ecbd73fe6e 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
@@ -582,15 +582,15 @@ define <2 x i32> @v_bitselect_v2i32_pat1(<2 x i32> %a, <2 x i32> %b, <2 x i32> %
 ; GFX7-LABEL: v_bitselect_v2i32_pat1:
 ; GFX7:       ; %bb.0:
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX7-NEXT:    v_bfi_b32 v1, v3, v1, v5
+; GFX7-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_bitselect_v2i32_pat1:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX8-NEXT:    v_bfi_b32 v1, v3, v1, v5
+; GFX8-NEXT:    v_bfi_b32 v0, v2, v0, v4
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_bitselect_v2i32_pat1:
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
index a01c2fa152ab3..2d73f17d74d8b 100644
--- a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
@@ -31,8 +31,8 @@ define <2 x half> @test_pown_reduced_fast_v2f16_known_odd(<2 x half> %x, <2 x i3
 ; GFX9-LABEL: test_pown_reduced_fast_v2f16_known_odd:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_or_b32_e32 v1, 1, v1
 ; GFX9-NEXT:    v_or_b32_e32 v2, 1, v2
+; GFX9-NEXT:    v_or_b32_e32 v1, 1, v1
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; GFX9-NEXT:    v_and_b32_e32 v3, 0x7fff7fff, v0
diff --git a/llvm/test/CodeGen/AMDGPU/dag-preserve-disjoint-flag.ll b/llvm/test/CodeGen/AMDGPU/dag-preserve-disjoint-flag.ll
index d63a36c4b2958..7e2e8b577e085 100644
--- a/llvm/test/CodeGen/AMDGPU/dag-preserve-disjoint-flag.ll
+++ b/llvm/test/CodeGen/AMDGPU/dag-preserve-disjoint-flag.ll
@@ -28,12 +28,15 @@ define amdgpu_ps <2 x i32> @s_or_v2i32_disjoint(<2 x i32> inreg %a, <2 x i32> in
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr2
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr1
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr0
-  ; CHECK-NEXT:   [[S_OR_B32_:%[0-9]+]]:sreg_32 = disjoint S_OR_B32 [[COPY2]], [[COPY]], implicit-def dead $scc
-  ; CHECK-NEXT:   [[S_OR_B32_1:%[0-9]+]]:sreg_32 = disjoint S_OR_B32 [[COPY3]], [[COPY1]], implicit-def dead $scc
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_1]]
-  ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY4]], implicit $exec
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[S_OR_B32_]]
-  ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+  ; CHECK-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:sgpr_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+  ; CHECK-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64 = disjoint S_OR_B64 killed [[REG_SEQUENCE1]], killed [[REG_SEQUENCE]], implicit-def dead $scc
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub0
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY5]], implicit $exec
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sreg_32 = COPY [[S_OR_B64_]].sub1
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 killed [[COPY7]], implicit $exec
   ; CHECK-NEXT:   $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
   ; CHECK-NEXT:   $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
   ; CHECK-NEXT:   SI_RETURN_TO_EPILOG $sgpr0, $sgpr1
@@ -64,10 +67,23 @@ define <2 x i32> @v_or_v2i32_disjoint(<2 x i32> %a, <2 x i32> %b) {
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; CHECK-NEXT:   [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = disjoint V_OR_B32_e64 [[COPY3]], [[COPY1]], implicit $exec
-  ; CHECK-NEXT:   [[V_OR_B32_e64_1:%[0-9]+]]:vgpr_32 = disjoint V_OR_B32_e64 [[COPY2]], [[COPY]], implicit $exec
-  ; CHECK-NEXT:   $vgpr0 = COPY [[V_OR_B32_e64_]]
-  ; CHECK-NEXT:   $vgpr1 = COPY [[V_OR_B32_e64_1]]
+  ; CHECK-NEXT:   [[DEF:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
+  ; CHECK-NEXT:   [[DEF2:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[DEF3:%[0-9]+]]:sgpr_32 = IMPLICIT_DEF
+  ; CHECK-NEXT:   [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY]], %subreg.sub1
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub1
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub1
+  ; CHECK-NEXT:   [[V_OR_B32_e64_:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[COPY5]], killed [[COPY4]], implicit $exec
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE]].sub0
+  ; CHECK-NEXT:   [[V_OR_B32_e64_1:%[0-9]+]]:vgpr_32 = V_OR_B32_e64 killed [[COPY7]], killed [[COPY6]], implicit $exec
+  ; CHECK-NEXT:   [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE killed [[V_OR_B32_e64_1]], %subreg.sub0, killed [[V_OR_B32_e64_]], %subreg.sub1
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE2]].sub0
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE2]].sub1
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY8]]
+  ; CHECK-NEXT:   $vgpr1 = COPY [[COPY9]]
   ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %result = or disjoint <2 x i32> %a, %b
   ret <2 x i32> %result
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 924378eb2376d..b6f5cad48883d 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -1759,4 +1759,4 @@ bb5:                                              ; preds = %bb, %.entry
 
 declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32 immarg) #0
 
-attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
\ No newline at end of file
diff --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index 4a79096442c96..7afd99ddb0ef6 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -2010,61 +2010,61 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %src0, <2 x i24> %src1, <2 x i24> %src2
 ; SI-LABEL: v_fshr_v2i24:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
+; SI-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; SI-NEXT:    s_mov_b32 s4, 0xaaaaaab
-; SI-NEXT:    v_mul_hi_u32 v6, v6, s4
-; SI-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
+; SI-NEXT:    v_mul_hi_u32 v6, v4, s4
+; SI-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
 ; SI-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
 ; SI-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
 ; SI-NEXT:    v_sub_i32_e32 v4, vcc, v4, v6
-; SI-NEXT:    v_mul_hi_u32 v6, v7, s4
+; SI-NEXT:    v_mul_hi_u32 v6, v5, s4
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, 8, v4
 ; SI-NEXT:    v_alignbit_b32 v0, v0, v2, v4
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
-; SI-NEXT:    v_mul_u32_u24_e32 v3, 24, v6
-; SI-NEXT:    v_sub_i32_e32 v3, vcc, v5, v3
-; SI-NEXT:    v_add_i32_e32 v3, vcc, 8, v3
-; SI-NEXT:    v_alignbit_b32 v1, v1, v2, v3
+; SI-NEXT:    v_mul_u32_u24_e32 v2, 24, v6
+; SI-NEXT:    v_sub_i32_e32 v2, vcc, v5, v2
+; SI-NEXT:    v_add_i32_e32 v2, vcc, 8, v2
+; SI-NEXT:    v_alignbit_b32 v1, v1, v3, v2
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_fshr_v2i24:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
+; VI-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; VI-NEXT:    s_mov_b32 s4, 0xaaaaaab
-; VI-NEXT:    v_mul_hi_u32 v6, v6, s4
-; VI-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
+; VI-NEXT:    v_mul_hi_u32 v6, v4, s4
+; VI-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
 ; VI-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
 ; VI-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
 ; VI-NEXT:    v_sub_u32_e32 v4, vcc, v4, v6
-; VI-NEXT:    v_mul_hi_u32 v6, v7, s4
+; VI-NEXT:    v_mul_hi_u32 v6, v5, s4
 ; VI-NEXT:    v_add_u32_e32 v4, vcc, 8, v4
 ; VI-NEXT:    v_alignbit_b32 v0, v0, v2, v4
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
-; VI-NEXT:    v_mul_u32_u24_e32 v3, 24, v6
-; VI-NEXT:    v_sub_u32_e32 v3, vcc, v5, v3
-; VI-NEXT:    v_add_u32_e32 v3, vcc, 8, v3
-; VI-NEXT:    v_alignbit_b32 v1, v1, v2, v3
+; VI-NEXT:    v_mul_u32_u24_e32 v2, 24, v6
+; VI-NEXT:    v_sub_u32_e32 v2, vcc, v5, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, 8, v2
+; VI-NEXT:    v_alignbit_b32 v1, v1, v3, v2
 ; VI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_fshr_v2i24:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; GFX9-NEXT:    s_mov_b32 s4, 0xaaaaaab
-; GFX9-NEXT:    v_mul_hi_u32 v6, v6, s4
-; GFX9-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
+; GFX9-NEXT:    v_mul_hi_u32 v6, v4, s4
+; GFX9-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX9-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
 ; GFX9-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
 ; GFX9-NEXT:    v_sub_u32_e32 v4, v4, v6
-; GFX9-NEXT:    v_mul_hi_u32 v6, v7, s4
+; GFX9-NEXT:    v_mul_hi_u32 v6, v5, s4
 ; GFX9-NEXT:    v_add_u32_e32 v4, 8, v4
 ; GFX9-NEXT:    v_alignbit_b32 v0, v0, v2, v4
-; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 8, v3
-; GFX9-NEXT:    v_mul_u32_u24_e32 v3, 24, v6
-; GFX9-NEXT:    v_sub_u32_e32 v3, v5, v3
-; GFX9-NEXT:    v_add_u32_e32 v3, 8, v3
-; GFX9-NEXT:    v_alignbit_b32 v1, v1, v2, v3
+; GFX9-NEXT:    v_mul_u32_u24_e32 v2, 24, v6
+; GFX9-NEXT:    v_sub_u32_e32 v2, v5, v2
+; GFX9-NEXT:    v_add_u32_e32 v2, 8, v2
+; GFX9-NEXT:    v_alignbit_b32 v1, v1, v3, v2
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; R600-LABEL: v_fshr_v2i24:
@@ -2075,12 +2075,12 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %src0, <2 x i24> %src1, <2 x i24> %src2
 ; GFX10-LABEL: v_fshr_v2i24:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
-; GFX10-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
+; GFX10-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
+; GFX10-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX10-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v6
-; GFX10-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v7
+; GFX10-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v4
+; GFX10-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v5
 ; GFX10-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
 ; GFX10-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
@@ -2091,109 +2091,29 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %src0, <2 x i24> %src1, <2 x i24> %src2
 ; GFX10-NEXT:    v_alignbit_b32 v1, v1, v3, v5
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX11-TRUE16-LABEL: v_fshr_v2i24:
-; GFX11-TRUE16:       ; %bb.0:
-; GFX11-TRUE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
-; GFX11-TRUE16-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GFX11-TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v6
-; GFX11-TRUE16-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v7
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
-; GFX11-TRUE16-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
-; GFX11-TRUE16-NEXT:    v_sub_nc_u32_e32 v5, v5, v7
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v4, 8, v4
-; GFX11-TRUE16-NEXT:    v_add_nc_u32_e32 v5, 8, v5
-; GFX11-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT:    v_alignbit_b32 v0, v0, v2, v4.l
-; GFX11-TRUE16-NEXT:    v_alignbit_b32 v1, v1, v3, v5.l
-; GFX11-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-FAKE16-LABEL: v_fshr_v2i24:
-; GFX11-FAKE16:       ; %bb.0:
-; GFX11-FAKE16-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
-; GFX11-FAKE16-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GFX11-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-FAKE16-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v6
-; GFX11-FAKE16-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
-; GFX11-FAKE16-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
-; GFX11-FAKE16-NEXT:    v_sub_nc_u32_e32 v5, v5, v7
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v4, 8, v4
-; GFX11-FAKE16-NEXT:    v_add_nc_u32_e32 v5, 8, v5
-; GFX11-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-FAKE16-NEXT:    v_alignbit_b32 v0, v0, v2, v4
-; GFX11-FAKE16-NEXT:    v_alignbit_b32 v1, v1, v3, v5
-; GFX11-FAKE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX12-TRUE16-LABEL: v_fshr_v2i24:
-; GFX12-TRUE16:       ; %bb.0:
-; GFX12-TRUE16-NEXT:    s_wait_loadcnt_dscnt 0x0
-; GFX12-TRUE16-NEXT:    s_wait_expcnt 0x0
-; GFX12-TRUE16-NEXT:    s_wait_samplecnt 0x0
-; GFX12-TRUE16-NEXT:    s_wait_bvhcnt 0x0
-; GFX12-TRUE16-NEXT:    s_wait_kmcnt 0x0
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
-; GFX12-TRUE16-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GFX12-TRUE16-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-TRUE16-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v6
-; GFX12-TRUE16-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v7
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
-; GFX12-TRUE16-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
-; GFX12-TRUE16-NEXT:    v_sub_nc_u32_e32 v5, v5, v7
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_add_nc_u32_e32 v4, 8, v4
-; GFX12-TRUE16-NEXT:    v_add_nc_u32_e32 v5, 8, v5
-; GFX12-TRUE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT:    v_alignbit_b32 v0, v0, v2, v4.l
-; GFX12-TRUE16-NEXT:    v_alignbit_b32 v1, v1, v3, v5.l
-; GFX12-TRUE16-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX12-FAKE16-LABEL: v_fshr_v2i24:
-; GFX12-FAKE16:       ; %bb.0:
-; GFX12-FAKE16-NEXT:    s_wait_loadcnt_dscnt 0x0
-; GFX12-FAKE16-NEXT:    s_wait_expcnt 0x0
-; GFX12-FAKE16-NEXT:    s_wait_samplecnt 0x0
-; GFX12-FAKE16-NEXT:    s_wait_bvhcnt 0x0
-; GFX12-FAKE16-NEXT:    s_wait_kmcnt 0x0
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v6, 0xffffff, v4
-; GFX12-FAKE16-NEXT:    v_and_b32_e32 v7, 0xffffff, v5
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
-; GFX12-FAKE16-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX12-FAKE16-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v6
-; GFX12-FAKE16-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v7
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
-; GFX12-FAKE16-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
-; GFX12-FAKE16-NEXT:    v_sub_nc_u32_e32 v5, v5, v7
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_add_nc_u32_e32 v4, 8, v4
-; GFX12-FAKE16-NEXT:    v_add_nc_u32_e32 v5, 8, v5
-; GFX12-FAKE16-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-FAKE16-NEXT:    v_alignbit_b32 v0, v0, v2, v4
-; GFX12-FAKE16-NEXT:    v_alignbit_b32 v1, v1, v3, v5
-; GFX12-FAKE16-NEXT:    s_setpc_b64 s[30:31]
+; GFX11-LABEL: v_fshr_v2i24:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
+; GFX11-NEXT:    v_and_b32_e32 v5, 0xffffff, v5
+; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 8, v2
+; GFX11-NEXT:    v_lshlrev_b32_e32 v3, 8, v3
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-NEXT:    v_mul_hi_u32 v6, 0xaaaaaab, v4
+; GFX11-NEXT:    v_mul_hi_u32 v7, 0xaaaaaab, v5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_mul_u32_u24_e32 v6, 24, v6
+; GFX11-NEXT:    v_mul_u32_u24_e32 v7, 24, v7
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_sub_nc_u32_e32 v4, v4, v6
+; GFX11-NEXT:    v_sub_nc_u32_e32 v5, v5, v7
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_add_nc_u32_e32 v4, 8, v4
+; GFX11-NEXT:    v_add_nc_u32_e32 v5, 8, v5
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_alignbit_b32 v0, v0, v2, v4
+; GFX11-NEXT:    v_alignbit_b32 v1, v1, v3, v5
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %ret = call <2 x i24> @llvm.fshr.v2i24(<2 x i24> %src0, <2 x i24> %src1, <2 x i24> %src2)
   ret <2 x i24> %ret
 }
diff --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll
index cc9650b9a7309..6fac3494a8ced 100644
--- a/llvm/test/CodeGen/AMDGPU/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/or.ll
@@ -18,8 +18,8 @@ define amdgpu_kernel void @or_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GFX6-NEXT:    s_mov_b32 s4, s0
 ; GFX6-NEXT:    s_mov_b32 s5, s1
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_or_b32_e32 v1, v1, v3
 ; GFX6-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX6-NEXT:    v_or_b32_e32 v1, v1, v3
 ; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GFX6-NEXT:    s_endpgm
 ;
@@ -37,8 +37,8 @@ define amdgpu_kernel void @or_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GFX8-NEXT:    s_mov_b32 s4, s0
 ; GFX8-NEXT:    s_mov_b32 s5, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_or_b32_e32 v1, v1, v3
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT:    v_or_b32_e32 v1, v1, v3
 ; GFX8-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GFX8-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/rotr.ll b/llvm/test/CodeGen/AMDGPU/rotr.ll
index d6e361d6e297e..7322e2f239ee8 100644
--- a/llvm/test/CodeGen/AMDGPU/rotr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rotr.ll
@@ -228,6 +228,134 @@ entry:
   ret void
 }
 
+define amdgpu_kernel void @rotr_v8i32(ptr addrspace(1) %in, <8 x i32> %x, <8 x i32> %y) {
+; R600-LABEL: rotr_v8i32:
+; R600:       ; %bb.0: ; %entry
+; R600-NEXT:    ALU 13, @4, KC0[CB0:0-32], KC1[]
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T2.XYZW, T3.X, 0
+; R600-NEXT:    MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
+; R600-NEXT:    CF_END
+; R600-NEXT:    ALU clause starting at 4:
+; R600-NEXT:     BIT_ALIGN_INT * T0.W, KC0[5].X, KC0[5].X, KC0[7].X,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Z, KC0[4].W, KC0[4].W, KC0[6].W,
+; R600-NEXT:     BIT_ALIGN_INT * T0.Y, KC0[4].Z, KC0[4].Z, KC0[6].Z,
+; R600-NEXT:     BIT_ALIGN_INT * T0.X, KC0[4].Y, KC0[4].Y, KC0[6].Y,
+; R600-NEXT:     LSHR * T1.X, KC0[2].Y, literal.x,
+; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+; R600-NEXT:     BIT_ALIGN_INT * T2.W, KC0[6].X, KC0[6].X, KC0[8].X,
+; R600-NEXT:     BIT_ALIGN_INT * T2.Z, KC0[5].W, KC0[5].W, KC0[7].W,
+; R600-NEXT:     BIT_ALIGN_INT * T2.Y, KC0[5].Z, KC0[5].Z, KC0[7].Z,
+; R600-NEXT:     BIT_ALIGN_INT * T2.X, KC0[5].Y, KC0[5].Y, KC0[7].Y,
+; R600-NEXT:     ADD_INT * T1.W, KC0[2].Y, literal.x,
+; R600-NEXT:    16(2.242078e-44), 0(0.000000e+00)
+; R600-NEXT:     LSHR * T3.X, PV.W, literal.x,
+; R600-NEXT:    2(2.802597e-45), 0(0.000000e+00)
+;
+; SI-LABEL: rotr_v8i32:
+; SI:       ; %bb.0: ; %entry
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x11
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s19
+; SI-NEXT:    v_alignbit_b32 v3, s11, s11, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s18
+; SI-NEXT:    v_alignbit_b32 v2, s10, s10, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s17
+; SI-NEXT:    v_alignbit_b32 v1, s9, s9, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s16
+; SI-NEXT:    v_alignbit_b32 v0, s8, s8, v0
+; SI-NEXT:    v_mov_b32_e32 v4, s23
+; SI-NEXT:    v_alignbit_b32 v7, s15, s15, v4
+; SI-NEXT:    v_mov_b32_e32 v4, s22
+; SI-NEXT:    v_alignbit_b32 v6, s14, s14, v4
+; SI-NEXT:    v_mov_b32_e32 v4, s21
+; SI-NEXT:    v_alignbit_b32 v5, s13, s13, v4
+; SI-NEXT:    v_mov_b32_e32 v4, s20
+; SI-NEXT:    v_alignbit_b32 v4, s12, s12, v4
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; GFX8-LABEL: rotr_v8i32:
+; GFX8:       ; %bb.0: ; %entry
+; GFX8-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x44
+; GFX8-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX8-NEXT:    v_mov_b32_e32 v1, s18
+; GFX8-NEXT:    v_mov_b32_e32 v4, s17
+; GFX8-NEXT:    v_alignbit_b32 v2, s10, s10, v1
+; GFX8-NEXT:    v_alignbit_b32 v1, s9, s9, v4
+; GFX8-NEXT:    v_mov_b32_e32 v4, s23
+; GFX8-NEXT:    v_alignbit_b32 v7, s15, s15, v4
+; GFX8-NEXT:    v_mov_b32_e32 v4, s22
+; GFX8-NEXT:    s_add_u32 s2, s0, 16
+; GFX8-NEXT:    v_alignbit_b32 v6, s14, s14, v4
+; GFX8-NEXT:    v_mov_b32_e32 v4, s21
+; GFX8-NEXT:    s_addc_u32 s3, s1, 0
+; GFX8-NEXT:    v_alignbit_b32 v5, s13, s13, v4
+; GFX8-NEXT:    v_mov_b32_e32 v4, s20
+; GFX8-NEXT:    v_mov_b32_e32 v9, s3
+; GFX8-NEXT:    v_mov_b32_e32 v0, s19
+; GFX8-NEXT:    v_alignbit_b32 v4, s12, s12, v4
+; GFX8-NEXT:    v_mov_b32_e32 v8, s2
+; GFX8-NEXT:    v_alignbit_b32 v3, s11, s11, v0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s16
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; GFX8-NEXT:    v_alignbit_b32 v0, s8, s8, v0
+; GFX8-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NEXT:    v_mov_b32_e32 v4, s0
+; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NEXT:    s_endpgm
+;
+; GFX10-LABEL: rotr_v8i32:
+; GFX10:       ; %bb.0: ; %entry
+; GFX10-NEXT:    s_clause 0x1
+; GFX10-NEXT:    s_load_dwordx16 s[8:23], s[4:5], 0x44
+; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX10-NEXT:    v_mov_b32_e32 v8, 0
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    v_alignbit_b32 v7, s15, s15, s23
+; GFX10-NEXT:    v_alignbit_b32 v6, s14, s14, s22
+; GFX10-NEXT:    v_alignbit_b32 v5, s13, s13, s21
+; GFX10-NEXT:    v_alignbit_b32 v4, s12, s12, s20
+; GFX10-NEXT:    v_alignbit_b32 v3, s11, s11, s19
+; GFX10-NEXT:    v_alignbit_b32 v2, s10, s10, s18
+; GFX10-NEXT:    v_alignbit_b32 v1, s9, s9, s17
+; GFX10-NEXT:    v_alignbit_b32 v0, s8, s8, s16
+; GFX10-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
+; GFX10-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1]
+; GFX10-NEXT:    s_endpgm
+;
+; GFX11-LABEL: rotr_v8i32:
+; GFX11:       ; %bb.0: ; %entry
+; GFX11-NEXT:    s_clause 0x1
+; GFX11-NEXT:    s_load_b512 s[8:23], s[4:5], 0x44
+; GFX11-NEXT:    s_load_b64 s[0:1], s[4:5], 0x24
+; GFX11-NEXT:    v_mov_b32_e32 v8, 0
+; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX11-NEXT:    v_alignbit_b32 v7, s15, s15, s23
+; GFX11-NEXT:    v_alignbit_b32 v6, s14, s14, s22
+; GFX11-NEXT:    v_alignbit_b32 v5, s13, s13, s21
+; GFX11-NEXT:    v_alignbit_b32 v4, s12, s12, s20
+; GFX11-NEXT:    v_alignbit_b32 v3, s11, s11, s19
+; GFX11-NEXT:    v_alignbit_b32 v2, s10, s10, s18
+; GFX11-NEXT:    v_alignbit_b32 v1, s9, s9, s17
+; GFX11-NEXT:    v_alignbit_b32 v0, s8, s8, s16
+; GFX11-NEXT:    s_clause 0x1
+; GFX11-NEXT:    global_store_b128 v8, v[4:7], s[0:1] offset:16
+; GFX11-NEXT:    global_store_b128 v8, v[0:3], s[0:1]
+; GFX11-NEXT:    s_endpgm
+entry:
+  %tmp0 = sub <8 x i32> <i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32, i32 32>, %y
+  %tmp1 = shl <8 x i32> %x, %tmp0
+  %tmp2 = lshr <8 x i32> %x, %y
+  %tmp3 = or <8 x i32> %tmp1, %tmp2
+  store <8 x i32> %tmp3, ptr addrspace(1) %in
+  ret void
+}
+
 declare i16 @llvm.fshr.i16(i16, i16, i16)
 
 define void @test_rotr_i16(ptr addrspace(1) nocapture readonly %sourceA, ptr addrspace(1) nocapture readonly %sourceB, ptr addrspace(1) nocapture %destValues) {
diff --git a/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll b/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll
index d496634ae474f..8af4a8de7b266 100644
--- a/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll
@@ -18,11 +18,11 @@ define <2 x i32> @test_add2x32(ptr %a_ptr, ptr %b_ptr) {
 ; CHECK-LABEL: test_add2x32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    flat_load_dword v4, v[2:3]
-; CHECK-NEXT:    flat_load_dword v5, v[0:1]
-; CHECK-NEXT:    v_mov_b32_e32 v1, 48
+; CHECK-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    flat_load_dwordx2 v[6:7], v[2:3]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_or_b32_e32 v0, v5, v4
+; CHECK-NEXT:    v_or_b32_e32 v1, v5, v7
+; CHECK-NEXT:    v_or_b32_e32 v0, v4, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %a = load <2 x i32>, ptr %a_ptr, !range !2, !noundef !{}
   %b = load <2 x i32>, ptr %b_ptr, !range !3, !noundef !{}
diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll
index 00bb7b24786f5..26562adc908cd 100644
--- a/llvm/test/CodeGen/AMDGPU/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor.ll
@@ -21,8 +21,8 @@ define amdgpu_kernel void @xor_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in
 ; SI-NEXT:    s_mov_b32 s4, s0
 ; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_xor_b32_e32 v1, v3, v1
 ; SI-NEXT:    v_xor_b32_e32 v0, v2, v0
+; SI-NEXT:    v_xor_b32_e32 v1, v3, v1
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -40,8 +40,8 @@ define amdgpu_kernel void @xor_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    v_mov_b32_e32 v5, s1
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_xor_b32_e32 v1, v1, v3
 ; VI-NEXT:    v_xor_b32_e32 v0, v0, v2
+; VI-NEXT:    v_xor_b32_e32 v1, v1, v3
 ; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; VI-NEXT:    s_endpgm
   %a = load <2 x i32>, ptr addrspace(1) %in0

>From 548e5199572ca787c3b35d6a68ab9ce311189236 Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Tue, 3 Jun 2025 07:59:46 -0500
Subject: [PATCH 2/7] Modify performSelectCombine and foldFreeOpFromSelect to
 prevent the performFNEGCombine changes from being unwound.

However, this still needs work as the changes causes a mixture
of codegen regressions and improvements in the fneg-* tests.

Update shl64-reduce.ll for vector v2i32 and.
---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |  41 +++-
 llvm/lib/Target/AMDGPU/SIISelLowering.h       |   3 -
 llvm/lib/Target/AMDGPU/SIInstructions.td      |   9 -
 llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll | 211 ++++++++++--------
 .../CodeGen/AMDGPU/fneg-modifier-casting.ll   |  55 +++--
 llvm/test/CodeGen/AMDGPU/shl64_reduce.ll      |  20 +-
 6 files changed, 188 insertions(+), 151 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 7ed055e8da2b6..a317087f3577b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4654,8 +4654,27 @@ AMDGPUTargetLowering::foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
     if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
       return SDValue();
 
-    return distributeOpThroughSelect(DCI, LHS.getOpcode(),
-                                     SDLoc(N), Cond, LHS, RHS);
+    // select c, (fneg (f32 bitcast i32 x)), (fneg (f32 bitcast i32 y)) can be
+    // lowered directly to a V_CNDMASK_. So prevent the fneg from being pulled
+    // out in this case. For now I've made the logic as specific to the case as
+    // possible, hopefully this can be relaxed in future.
+    if (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG) {
+      SDValue LHSB = LHS.getOperand(0);
+      SDValue RHSB = RHS.getOperand(0);
+      if (LHSB.getOpcode() == ISD::BITCAST &&
+          RHSB->getOpcode() == ISD::BITCAST) {
+        EVT LHSBOpTy = LHSB->getOperand(0).getValueType();
+        EVT RHSBOpTy = RHSB->getOperand(0).getValueType();
+        if (LHSB.getValueType() == MVT::f32 &&
+            RHSB.getValueType() == MVT::f32 && LHSBOpTy == MVT::i32 &&
+            RHSBOpTy == MVT::i32) {
+          return SDValue();
+        }
+      }
+    }
+
+    return distributeOpThroughSelect(DCI, LHS.getOpcode(), SDLoc(N), Cond, LHS,
+                                     RHS);
   }
 
   bool Inv = false;
@@ -4708,8 +4727,8 @@ AMDGPUTargetLowering::foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
       if (Inv)
         std::swap(NewLHS, NewRHS);
 
-      SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
-                                      Cond, NewLHS, NewRHS);
+      SDValue NewSelect =
+          DAG.getNode(ISD::SELECT, SL, VT, Cond, NewLHS, NewRHS);
       DCI.AddToWorklist(NewSelect.getNode());
       return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
     }
@@ -5047,8 +5066,20 @@ SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
   }
   case ISD::SELECT: {
     // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
+    // This combine became necessary recently to prevent a regression after v2i32 xor was made legal.
+    // When adding this combine a case was added to performFNEGCombine to prevent this combine from
+    // being undone under certain conditions.
     // TODO: Invert conditions of foldFreeOpFromSelect
-    return SDValue();
+    SDValue Cond = N0.getOperand(0);
+    SDValue LHS = N0.getOperand(1);
+    SDValue RHS = N0.getOperand(2);
+    EVT LHVT = LHS.getValueType();
+    EVT RHVT = RHS.getValueType();
+
+    SDValue LFNeg = DAG.getNode(ISD::FNEG, SL, LHVT, LHS);
+    SDValue RFNeg = DAG.getNode(ISD::FNEG, SL, RHVT, RHS);
+    SDValue Op = DAG.getNode(Opc, SL, LHVT, Cond, LFNeg, RFNeg);
+    return Op;
   }
   case ISD::BITCAST: {
     SDLoc SL(N);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index a1fdae384502a..1248d6f146031 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -366,9 +366,6 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                         Type *Ty) const override;
 
-  // bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
-  //                                           EVT VT) const override;
-
   bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                unsigned Index) const override;
   bool isExtractVecEltCheap(EVT VT, unsigned Index) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index dd053703be74e..def34eb6ba173 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2376,10 +2376,6 @@ def : AMDGPUPat <
                $src1), sub1)
 >;
 
-def : AMDGPUPat <
-  (fneg (select i1:$src0, (f32 (bitconvert i32:$src1)), (f32 (bitconvert i32:$src2)))),
-    (V_CNDMASK_B32_e64 (i32 1), $src2, (i32 1), $src1, $src0)>;
-
 let True16Predicate = NotHasTrue16BitInsts in {
 def : ROTRPattern <V_ALIGNBIT_B32_e64>;
 
@@ -2395,11 +2391,6 @@ def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
 
 let True16Predicate = UseRealTrue16Insts in {
 
-// Prevents regression in fneg-modifier-casting.ll along with modifications to XorCombine() when v2i32 or is legal.
-def : AMDGPUPat <
-  (fneg (select i1:$src0, (f32 (bitconvert i32:$src1)), (f32 (bitconvert i32:$src2)))),
-    (V_CNDMASK_B32_e64 (i32 1), $src2, (i32 1), $src1, $src0)>;
-
 def : GCNPat <
   (rotr i32:$src0, i32:$src1),
   (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index 46da9d33639b6..13beea3782921 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -1532,10 +1532,9 @@ define float @v_fneg_inv2pi_minimum_f32(float %a) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f32_e32 v1, 0.15915494, v0
-; VI-NEXT:    v_mov_b32_e32 v2, 0x7fc00000
+; VI-NEXT:    v_mov_b32_e32 v2, 0xffc00000
 ; VI-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, v2, -v1, vcc
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0x3FC45F3060000000, float %a)
   %fneg = fneg float %min
@@ -1582,10 +1581,10 @@ define half @v_fneg_inv2pi_minimum_f16(half %a) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f16_e32 v1, 0.15915494, v0
-; VI-NEXT:    v_mov_b32_e32 v2, 0x7e00
+; VI-NEXT:    v_xor_b32_e32 v1, 0x8000, v1
+; VI-NEXT:    v_mov_b32_e32 v2, 0xfe00
 ; VI-NEXT:    v_cmp_o_f16_e32 vcc, v0, v0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call half @llvm.minimum.f16(half 0xH3118, half %a)
   %fneg = fneg half %min
@@ -1690,10 +1689,10 @@ define float @v_fneg_0_minimum_foldable_use_f32_ieee(float %a, float %b) #0 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_min_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0.0, float %a)
   %fneg = fneg float %min
@@ -1717,10 +1716,10 @@ define float @v_fneg_inv2pi_minimum_foldable_use_f32(float %a, float %b) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v0
-; VI-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
+; VI-NEXT:    v_mov_b32_e32 v3, 0xffc00000
 ; VI-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; VI-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; VI-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
+; VI-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0x3FC45F3060000000, float %a)
   %fneg = fneg float %min
@@ -1733,10 +1732,10 @@ define float @v_fneg_0_minimum_foldable_use_f32_no_ieee(float %a, float %b) #4 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_min_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0.0, float %a)
   %fneg = fneg float %min
@@ -1935,10 +1934,10 @@ define float @v_fneg_0_maximum_foldable_use_f32_ieee(float %a, float %b) #0 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_max_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %max = call float @llvm.maximum.f32(float 0.0, float %a)
   %fneg = fneg float %max
@@ -1951,10 +1950,10 @@ define float @v_fneg_0_maximum_foldable_use_f32_no_ieee(float %a, float %b) #4 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_max_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %max = call float @llvm.maximum.f32(float 0.0, float %a)
   %fneg = fneg float %max
@@ -3962,12 +3961,13 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f32(float %arg, i1 %
 ; SI-LABEL: s_fneg_select_infloop_regression_f32:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_bitcmp1_b32 s1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; SI-NEXT:    v_cndmask_b32_e64 v2, -v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    flat_store_dword v[0:1], v2
@@ -3976,12 +3976,13 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f32(float %arg, i1 %
 ; VI-LABEL: s_fneg_select_infloop_regression_f32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bitcmp1_b32 s1, 0
-; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s0
 ; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; VI-NEXT:    v_cndmask_b32_e64 v2, -v0, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    flat_store_dword v[0:1], v2
@@ -3998,9 +3999,10 @@ define float @v_fneg_select_infloop_regression_f32(float %arg, i1 %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 0.0, float %arg
   %i2 = fneg float %i
@@ -4013,9 +4015,10 @@ define float @v_fneg_select_infloop_regression_f32_commute0(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 0.0
   %i2 = fneg float %i
@@ -4028,9 +4031,10 @@ define float @v_fneg_select_infloop_regression_f32_commute1(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 0.0, float %arg
   %i2 = fneg float %i
@@ -4043,9 +4047,10 @@ define float @v_fneg_select_infloop_regression_f32_commute2(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 0.0
   %i2 = fneg float %i
@@ -4060,8 +4065,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32(float %arg, i1 %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 2.0, float %arg
   %i2 = fneg float %i
@@ -4075,8 +4080,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute0(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 2.0
   %i2 = fneg float %i
@@ -4090,8 +4095,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute1(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 2.0, float %arg
   %i2 = fneg float %i
@@ -4105,8 +4110,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute2(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 2.0
   %i2 = fneg float %i
@@ -4121,8 +4126,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32(float %arg, i1
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float -2.0, float %arg
   %i2 = fneg float %i
@@ -4136,8 +4141,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute0(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float -2.0
   %i2 = fneg float %i
@@ -4151,8 +4156,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute1(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float -2.0, float %arg
   %i2 = fneg float %i
@@ -4166,8 +4171,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute2(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float -2.0
   %i2 = fneg float %i
@@ -4181,17 +4186,16 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f64(double %arg, i1
 ; SI-NEXT:    s_load_dword s6, s[4:5], 0xb
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0xd
-; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bitcmp1_b32 s6, 0
-; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_and_b64 s[6:7], s[4:5], exec
-; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[4:5]
+; SI-NEXT:    s_and_b32 s4, 1, s6
+; SI-NEXT:    s_xor_b32 s1, s1, 0x80000000
+; SI-NEXT:    s_cmp_eq_u32 s4, 1
+; SI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
 ; SI-NEXT:    s_cselect_b32 s0, 0, s0
+; SI-NEXT:    s_cselect_b32 s1, 0, s1
 ; SI-NEXT:    v_mov_b32_e32 v2, s2
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
 ; SI-NEXT:    v_mov_b32_e32 v3, s3
 ; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; SI-NEXT:    s_endpgm
@@ -4201,17 +4205,16 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f64(double %arg, i1
 ; VI-NEXT:    s_load_dword s6, s[4:5], 0x2c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
-; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_bitcmp1_b32 s6, 0
-; VI-NEXT:    s_cselect_b64 s[4:5], -1, 0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_and_b64 s[6:7], s[4:5], exec
-; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[4:5]
+; VI-NEXT:    s_and_b32 s4, 1, s6
+; VI-NEXT:    s_xor_b32 s1, s1, 0x80000000
+; VI-NEXT:    s_cmp_eq_u32 s4, 1
+; VI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
 ; VI-NEXT:    s_cselect_b32 s0, 0, s0
+; VI-NEXT:    s_cselect_b32 s1, 0, s1
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
-; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -4227,9 +4230,10 @@ define double @v_fneg_select_infloop_regression_f64(double %arg, i1 %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 1, v2
+; GCN-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-NEXT:    v_bfrev_b32_e32 v3, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v1, -v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -4244,12 +4248,14 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f16(half %arg, i1 %a
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dword s2, s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xb
+; SI-NEXT:    v_bfrev_b32_e32 v1, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
+; SI-NEXT:    s_and_b32 s3, s2, 0xffff
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -s3
 ; SI-NEXT:    s_bitcmp1_b32 s2, 16
-; SI-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, s[2:3]
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_cvt_f16_f32_e32 v2, v0
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
@@ -4260,13 +4266,14 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f16(half %arg, i1 %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dword s2, s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; VI-NEXT:    v_mov_b32_e32 v0, 0x8000
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bitcmp1_b32 s2, 16
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
-; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
-; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[2:3]
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
+; VI-NEXT:    s_xor_b32 s2, s2, 0x8000
+; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    flat_store_short v[0:1], v2
@@ -4282,21 +4289,23 @@ define half @v_fneg_select_infloop_regression_f16(half %arg, i1 %arg1) {
 ; SI-LABEL: v_fneg_select_infloop_regression_f16:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    v_cvt_f16_f32_e64 v0, -v0
 ; SI-NEXT:    v_and_b32_e32 v1, 1, v1
+; SI-NEXT:    v_bfrev_b32_e32 v2, 1
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; SI-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_fneg_select_infloop_regression_f16:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_and_b32_e32 v1, 1, v1
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
+; VI-NEXT:    v_mov_b32_e32 v2, 0x8000
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, half 0.0, half %arg
@@ -4311,9 +4320,9 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f16(<2 x half> %ar
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_and_b32 s1, 1, s1
-; SI-NEXT:    s_cselect_b32 s0, 0, s0
 ; SI-NEXT:    s_xor_b32 s0, s0, 0x80008000
 ; SI-NEXT:    s_cmp_eq_u32 s1, 1
+; SI-NEXT:    s_cselect_b32 s0, 0x80008000, s0
 ; SI-NEXT:    s_cselect_b32 s0, 0, s0
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
@@ -4326,9 +4335,9 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f16(<2 x half> %ar
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_and_b32 s1, 1, s1
-; VI-NEXT:    s_cselect_b32 s0, 0, s0
 ; VI-NEXT:    s_xor_b32 s0, s0, 0x80008000
 ; VI-NEXT:    s_cmp_eq_u32 s1, 1
+; VI-NEXT:    s_cselect_b32 s0, 0x80008000, s0
 ; VI-NEXT:    s_cselect_b32 s0, 0, s0
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
@@ -4351,9 +4360,10 @@ define <2 x half> @v_fneg_select_infloop_regression_v2f16(<2 x half> %arg, i1 %a
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 1, v2
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
+; SI-NEXT:    v_mov_b32_e32 v2, 0x80008000
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, vcc
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -4364,9 +4374,10 @@ define <2 x half> @v_fneg_select_infloop_regression_v2f16(<2 x half> %arg, i1 %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_and_b32_e32 v1, 1, v1
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
+; VI-NEXT:    v_mov_b32_e32 v2, 0x80008000
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, <2 x half> zeroinitializer, <2 x half> %arg
@@ -4380,17 +4391,18 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f32(<2 x float> %a
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bitcmp1_b32 s2, 0
-; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v2, -v1, v0, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[2:3]
+; SI-NEXT:    s_and_b32 s2, 1, s2
+; SI-NEXT:    s_xor_b32 s1, s1, 0x80000000
+; SI-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; SI-NEXT:    s_cmp_eq_u32 s2, 1
+; SI-NEXT:    s_cselect_b32 s0, 0x80000000, s0
+; SI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
+; SI-NEXT:    s_cselect_b32 s1, 0, s1
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
 ; SI-NEXT:    v_mov_b32_e32 v2, s4
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
 ; SI-NEXT:    v_mov_b32_e32 v3, s5
 ; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; SI-NEXT:    s_endpgm
@@ -4399,17 +4411,18 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f32(<2 x float> %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_bitcmp1_b32 s2, 0
-; VI-NEXT:    v_mov_b32_e32 v1, s0
-; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v2, -v1, v0, s[2:3]
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[2:3]
-; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
-; VI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[2:3]
+; VI-NEXT:    s_and_b32 s2, 1, s2
+; VI-NEXT:    s_xor_b32 s1, s1, 0x80000000
+; VI-NEXT:    s_xor_b32 s0, s0, 0x80000000
+; VI-NEXT:    s_cmp_eq_u32 s2, 1
+; VI-NEXT:    s_cselect_b32 s0, 0x80000000, s0
+; VI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
+; VI-NEXT:    s_cselect_b32 s1, 0, s1
+; VI-NEXT:    s_cselect_b32 s0, 0, s0
 ; VI-NEXT:    v_mov_b32_e32 v2, s4
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s5
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -4425,10 +4438,12 @@ define <2 x float> @v_fneg_select_infloop_regression_v2f32(<2 x float> %arg, i1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 1, v2
+; GCN-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-NEXT:    v_bfrev_b32_e32 v3, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v1, -v1, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v3, vcc
+; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index b6f5cad48883d..6f29d2c7418a2 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -416,19 +416,22 @@ define double @fneg_xor_select_f64(i1 %cond, double %arg0, double %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; GCN-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, -v4, -v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v2, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: fneg_xor_select_f64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; GFX11-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, -v4, -v2, vcc_lo
+; GFX11-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %select = select i1 %cond, double %arg0, double %arg1
   %fneg = fneg double %select
@@ -1639,19 +1642,16 @@ define amdgpu_kernel void @multiple_uses_fneg_select_f64(double %x, double %y, i
 ; GFX7-NEXT:    s_add_i32 s12, s12, s17
 ; GFX7-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    s_bitcmp1_b32 s6, 0
-; GFX7-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX7-NEXT:    s_and_b64 s[6:7], vcc, exec
-; GFX7-NEXT:    v_mov_b32_e32 v0, s3
-; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    s_and_b32 s6, 1, s6
 ; GFX7-NEXT:    s_cselect_b32 s1, s1, s3
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX7-NEXT:    s_xor_b32 s3, s1, 0x80000000
+; GFX7-NEXT:    s_cmp_eq_u32 s6, 1
 ; GFX7-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX7-NEXT:    v_mov_b32_e32 v1, s1
+; GFX7-NEXT:    s_cselect_b32 s1, s3, s1
 ; GFX7-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX7-NEXT:    s_mov_b32 flat_scratch_lo, s13
-; GFX7-NEXT:    v_cndmask_b32_e64 v1, v1, -v0, vcc
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s0
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX7-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX7-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX7-NEXT:    s_endpgm
@@ -1663,37 +1663,32 @@ define amdgpu_kernel void @multiple_uses_fneg_select_f64(double %x, double %y, i
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x18
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_bitcmp1_b32 s6, 0
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    s_and_b64 s[6:7], vcc, exec
-; GFX9-NEXT:    v_mov_b32_e32 v0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_and_b32 s6, 1, s6
 ; GFX9-NEXT:    s_cselect_b32 s1, s1, s3
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT:    s_xor_b32 s3, s1, 0x80000000
+; GFX9-NEXT:    s_cmp_eq_u32 s6, 1
 ; GFX9-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, -v0, vcc
+; GFX9-NEXT:    s_cselect_b32 s1, s3, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: multiple_uses_fneg_select_f64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x2
-; GFX11-NEXT:    s_load_b128 s[0:3], s[4:5], 0x0
 ; GFX11-NEXT:    s_load_b32 s6, s[4:5], 0x10
+; GFX11-NEXT:    s_load_b128 s[0:3], s[4:5], 0x0
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x18
-; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_mov_b32_e32 v0, s1
-; GFX11-NEXT:    s_bitcmp1_b32 s6, 0
-; GFX11-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, s3, v0, vcc_lo
-; GFX11-NEXT:    s_and_b32 s6, vcc_lo, exec_lo
+; GFX11-NEXT:    s_and_b32 s6, 1, s6
 ; GFX11-NEXT:    s_cselect_b32 s1, s1, s3
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_xor_b32 s3, s1, 0x80000000
+; GFX11-NEXT:    s_cmp_eq_u32 s6, 1
 ; GFX11-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, s1, -v0, vcc_lo
+; GFX11-NEXT:    s_cselect_b32 s1, s3, s1
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s1
 ; GFX11-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
 ; GFX11-NEXT:    s_endpgm
@@ -1759,4 +1754,4 @@ bb5:                                              ; preds = %bb, %.entry
 
 declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32 immarg) #0
 
-attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
\ No newline at end of file
+attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 21b7ed4d6b779..a7cb97ceb4c3a 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -78,8 +78,10 @@ define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    flat_load_dwordx4 v[3:6], v[4:5]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v3, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v5, v2
+; CHECK-NEXT:    v_and_b32_e32 v4, 31, v5
+; CHECK-NEXT:    v_and_b32_e32 v1, 31, v3
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v4, v2
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -304,8 +306,10 @@ define <2 x i64> @shl_v2_or32(<2 x i64> %arg0, <2 x i64> %shift_amt) {
 ; CHECK-LABEL: shl_v2_or32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v4, v0
-; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v6, v2
+; CHECK-NEXT:    v_and_b32_e32 v3, 31, v6
+; CHECK-NEXT:    v_and_b32_e32 v1, 31, v4
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v1, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v3, v3, v2
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
@@ -367,8 +371,12 @@ define <2 x i64> @shl_v2_or32_sgpr(<2 x i64> inreg %arg0, <2 x i64> inreg %shift
 ; CHECK-LABEL: shl_v2_or32_sgpr:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    s_lshl_b32 s4, s16, s20
-; CHECK-NEXT:    s_lshl_b32 s5, s18, s22
+; CHECK-NEXT:    s_mov_b32 s4, 31
+; CHECK-NEXT:    s_mov_b32 s21, s22
+; CHECK-NEXT:    s_mov_b32 s5, s4
+; CHECK-NEXT:    s_and_b64 s[4:5], s[20:21], s[4:5]
+; CHECK-NEXT:    s_lshl_b32 s4, s16, s4
+; CHECK-NEXT:    s_lshl_b32 s5, s18, s5
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s4
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0

>From 942c79ab144c3670adac58d49471167c32580994 Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Wed, 4 Jun 2025 09:04:50 -0500
Subject: [PATCH 3/7] Account for fneg-combines.ll

---
 llvm/test/CodeGen/AMDGPU/fneg-combines.ll | 3791 +++++++++++++++++++++
 1 file changed, 3791 insertions(+)

diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 14c18df6d8e41..b8af82d90d6b5 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=amdgcn -mcpu=hawaii -start-before=amdgpu-unify-divergent-exit-nodes -mattr=+flat-for-global < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,SI %s
 ; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,SI %s
 
@@ -41,6 +42,57 @@ define amdgpu_kernel void @v_fneg_add_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_add_store_use_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_add_store_use_add_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_add_store_use_add_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -257,6 +309,17 @@ define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i
 ; GCN-NSZ-DAG: v_cmp_nlt_f32_e32 {{.*}}, 0
 ; GCN-NSZ-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, [[C]], 0,
 define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #2 {
+; GCN-NSZ-LABEL: fneg_fadd_0_nsz:
+; GCN-NSZ:       ; %bb.0: ; %.entry
+; GCN-NSZ-NEXT:    v_rcp_f32_e32 v0, s1
+; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NSZ-NEXT:    v_mul_f32_e32 v0, 0, v0
+; GCN-NSZ-NEXT:    v_cmp_ngt_f32_e32 vcc, s0, v0
+; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, -v0, v1, vcc
+; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, 0x7fc00000
+; GCN-NSZ-NEXT:    v_cmp_nlt_f32_e32 vcc, 0, v0
+; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, v1, 0, vcc
+; GCN-NSZ-NEXT:    ; return to shader part epilog
 .entry:
   %tmp7 = fdiv afn float 1.000000e+00, %tmp6
   %tmp8 = fmul float 0.000000e+00, %tmp7
@@ -280,6 +343,51 @@ define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -301,6 +409,57 @@ define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -326,6 +485,57 @@ define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e64 v2, v4, -v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e64 v2, v4, -v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -347,6 +557,51 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -367,6 +622,51 @@ define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_x_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_x_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -387,6 +687,51 @@ define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_f32_e64 v2, v0, -v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_f32_e64 v2, v0, -v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -411,6 +756,57 @@ define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr a
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -434,6 +830,61 @@ define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mul_f32_e64 v3, -v4, s2
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mul_f32_e64 v3, -v4, s2
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -462,6 +913,55 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_max_f32_e32 v2, v3, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_max_f32_e32 v2, v3, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -481,6 +981,10 @@ define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_max_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   ret float %fneg
@@ -492,6 +996,41 @@ define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, v2, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_self_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, v2, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -508,6 +1047,10 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %a)
   %min.fneg = fneg float %min
   ret float %min.fneg
@@ -519,6 +1062,41 @@ define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -535,6 +1113,10 @@ define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -4.0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -546,6 +1128,41 @@ define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -562,6 +1179,10 @@ define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, 4.0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -574,6 +1195,41 @@ define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MIN]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_min_f32_e32 v2, 0, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_min_f32_e32 v2, 0, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -591,6 +1247,41 @@ define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -614,6 +1305,42 @@ define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, pt
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -636,6 +1363,41 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_max_f32_e32 v2, 0x3e22f983, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_max_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -660,6 +1422,43 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT:    v_max_f32_e32 v0, 0xbe230000, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f16_e32 v2, v3, v3
+; VI-NEXT:    v_min_f16_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x8000, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -683,6 +1482,42 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT:    v_max_f32_e32 v0, 0x3e230000, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f16_e64 v2, -v3, -v3
+; VI-NEXT:    v_max_f16_e32 v2, 0.15915494, v2
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -707,6 +1542,44 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RESULT_LO]]:[[RESULT_HI]]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT:    s_mov_b32 s3, 0xbfc45f30
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
+; VI-NEXT:    v_min_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -731,6 +1604,43 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT:    s_mov_b32 s3, 0x3fc45f30
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; VI-NEXT:    v_max_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -747,6 +1657,10 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, p
 ; GCN: v_max_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, 0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -0.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -760,6 +1674,55 @@ define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT:    v_min_f32_e32 v2, 0, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_min_f32_e32 v2, 0, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -789,6 +1752,55 @@ define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v4
+; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT:    v_mul_f32_e32 v2, v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -810,6 +1822,11 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_minnum_foldable_use_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e32 v0, 0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 0.0, float %a)
   %fneg = fneg float %min
   %mul = fmul float %fneg, %b
@@ -828,6 +1845,61 @@ define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_max_f32_e32 v2, v3, v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_max_f32_e32 v2, v3, v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -850,6 +1922,11 @@ define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_multi_use_minnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   %use1 = fmul float %min, 4.0
@@ -871,6 +1948,55 @@ define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_min_f32_e32 v2, v3, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_min_f32_e32 v2, v3, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -890,6 +2016,10 @@ define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_min_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   ret float %fneg
@@ -901,6 +2031,41 @@ define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, v2, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, v2, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -917,6 +2082,10 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %a)
   %max.fneg = fneg float %max
   ret float %max.fneg
@@ -928,6 +2097,41 @@ define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -944,6 +2148,10 @@ define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -4.0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -955,6 +2163,41 @@ define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, 4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -971,6 +2214,10 @@ define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, 4.0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -983,6 +2230,41 @@ define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MAX]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_max_f32_e32 v2, 0, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_max_f32_e32 v2, 0, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1000,6 +2282,41 @@ define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    v_min_f32_e32 v2, 0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    v_min_f32_e32 v2, 0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1016,6 +2333,10 @@ define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, 0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -0.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -1029,6 +2350,55 @@ define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT:    v_max_f32_e32 v2, 0, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT:    v_max_f32_e32 v2, 0, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1050,6 +2420,11 @@ define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_maxnum_foldable_use_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_max_f32_e32 v0, 0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 0.0, float %a)
   %fneg = fneg float %max
   %mul = fmul float %fneg, %b
@@ -1068,6 +2443,61 @@ define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT:    v_min_f32_e32 v2, v3, v2
+; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT:    v_min_f32_e32 v2, v3, v2
+; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1090,6 +2520,11 @@ define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_maxnum_multi_use_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_no_ieee:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   %use1 = fmul float %max, 4.0
@@ -1139,6 +2574,65 @@ define amdgpu_kernel void @v_fneg_fma_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: v_fneg_fma_store_use_fma_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, v6, v2, v3
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fma_store_use_fma_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, v6, v2, v3
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1494,6 +2988,41 @@ define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(ptr addrspace(1) %out,
 ; GCN: v_cvt_f64_f32_e64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1510,6 +3039,41 @@ define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, pt
 ; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
 ; GCN: {{buffer|flat}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1529,6 +3093,49 @@ define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %ou
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FNEG_A]]
 define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
+; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
+; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1549,6 +3156,51 @@ define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[CVT_LO]]:[[CVT_HI]]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; VI-NEXT:    v_mov_b32_e32 v4, v0
+; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1569,6 +3221,52 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
+; SI-NEXT:    v_mul_f64 v[0:1], v[0:1], 4.0
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v6, s1
+; VI-NEXT:    v_add_u32_e32 v5, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; VI-NEXT:    v_cvt_f64_f32_e32 v[1:2], v1
+; VI-NEXT:    v_mul_f64 v[3:4], v[1:2], 4.0
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dwordx2 v[5:6], v[1:2]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[5:6], v[3:4]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1585,6 +3283,49 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(p
 ; FIXME: Source modifiers not folded for f16->f32
 ; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1599,6 +3340,51 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrsp
 
 ; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, 4.0, v3
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    v_mul_f32_e32 v3, 4.0, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1621,6 +3407,41 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(p
 ; GCN: v_cvt_f32_f64_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1637,6 +3458,41 @@ define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1656,6 +3512,49 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[A_LO]]:[[NEG_A_HI]]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, s1
+; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[3:4], v5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v4, s1
+; VI-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; VI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[3:4], v5
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1677,6 +3576,51 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, double %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v4, s1
+; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT:    v_mul_f64 v[0:1], -v[1:2], s[4:5]
+; SI-NEXT:    flat_store_dword v[3:4], v5
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_mul_f64 v[3:4], -v[1:2], s[4:5]
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[3:4]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1696,6 +3640,41 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1712,6 +3691,41 @@ define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v1, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v1
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v1, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v1
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1731,6 +3745,49 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v3, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1750,6 +3807,49 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v2, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v2, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1770,6 +3870,51 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v2, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT:    v_mul_f32_e64 v2, -v2, s4
+; SI-NEXT:    flat_store_short v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v2, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT:    v_mul_f32_e64 v2, -v2, s4
+; VI-NEXT:    flat_store_short v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1793,6 +3938,39 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_rcp_f32_e64 v3, -v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_rcp_f32_e64 v3, -v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1809,6 +3987,39 @@ define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_rcp_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_rcp_f32_e32 v3, v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1828,6 +4039,47 @@ define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rcp_f32_e32 v4, v3
+; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1848,6 +4100,49 @@ define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    v_mul_f32_e64 v2, -v3, s4
+; SI-NEXT:    flat_store_dword v[0:1], v4
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rcp_f32_e32 v4, v3
+; VI-NEXT:    v_mul_f32_e64 v2, -v3, s4
+; VI-NEXT:    flat_store_dword v[0:1], v4
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1872,6 +4167,51 @@ define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: v_mul_legacy_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1895,6 +4235,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addr
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1919,6 +4310,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrsp
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
+; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
+; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1940,6 +4382,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrsp
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1960,6 +4447,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1980,6 +4512,51 @@ define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v1, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v1, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2003,6 +4580,57 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2026,6 +4654,61 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s7
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v4, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
+; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v4, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
+; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2053,6 +4736,43 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(
 ; GCN: v_sin_f32_e32 [[RESULT:v[0-9]+]], [[FRACT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_sin_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
+; SI-NEXT:    v_fract_f32_e32 v0, v0
+; SI-NEXT:    v_sin_f32_e32 v3, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_sin_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
+; VI-NEXT:    v_fract_f32_e32 v0, v0
+; VI-NEXT:    v_sin_f32_e32 v3, v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2069,6 +4789,39 @@ define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_sin_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_amdgcn_sin_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_sin_f32_e64 v3, -v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_amdgcn_sin_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_sin_f32_e64 v3, -v0
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2089,6 +4842,39 @@ define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_trunc_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_trunc_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_trunc_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_trunc_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_trunc_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_trunc_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2136,6 +4922,39 @@ define amdgpu_kernel void @v_fneg_round_f32(ptr addrspace(1) %out, ptr addrspace
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rint_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rndne_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_rint_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rndne_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2156,6 +4975,39 @@ define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_nearbyint_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_rndne_f32_e64 v2, -v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_nearbyint_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_rndne_f32_e64 v2, -v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2176,6 +5028,39 @@ define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], -1.0, [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_canonicalize_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    flat_load_dword v3, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_canonicalize_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    flat_load_dword v3, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2198,6 +5083,63 @@ define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr ad
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p1_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_mov_b32 m0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    v_interp_p1_f32 v3, v2, attr0.x
+; SI-NEXT:    v_interp_p1_f32 v2, v2, attr0.y
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p1_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_mov_b32 m0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    v_interp_p1_f32_e32 v3, v2, attr0.x
+; VI-NEXT:    v_interp_p1_f32_e32 v2, v2, attr0.y
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2221,6 +5163,67 @@ define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p2_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    v_mov_b32_e32 v6, 4.0
+; SI-NEXT:    s_mov_b32 m0, 0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    flat_load_dword v5, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_mov_b32_e32 v3, 4.0
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT:    v_interp_p2_f32 v6, v2, attr0.x
+; SI-NEXT:    v_interp_p2_f32 v3, v2, attr0.y
+; SI-NEXT:    flat_store_dword v[0:1], v6
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p2_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    v_mov_b32_e32 v6, 4.0
+; VI-NEXT:    s_mov_b32 m0, 0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dword v5, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_mov_b32_e32 v3, 4.0
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT:    v_interp_p2_f32_e32 v6, v2, attr0.x
+; VI-NEXT:    v_interp_p2_f32_e32 v3, v2, attr0.y
+; VI-NEXT:    flat_store_dword v[0:1], v6
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2256,6 +5259,85 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 
 define amdgpu_kernel void @v_fneg_copytoreg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_copytoreg_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_load_dword s0, s[4:5], 0x11
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v7, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s8, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    v_mul_f32_e32 v3, v7, v3
+; SI-NEXT:    s_cbranch_scc0 .LBB105_2
+; SI-NEXT:  ; %bb.1: ; %endif
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+; SI-NEXT:  .LBB105_2: ; %if
+; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
+; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_copytoreg_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_load_dword s0, s[4:5], 0x44
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v7, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s8, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    v_mul_f32_e32 v3, v7, v3
+; VI-NEXT:    s_cbranch_scc0 .LBB105_2
+; VI-NEXT:  ; %bb.1: ; %endif
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
+; VI-NEXT:  .LBB105_2: ; %if
+; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
+; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2292,6 +5374,67 @@ endif:
 ; GCN: ; use [[MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e64 v2, v6, -v2
+; SI-NEXT:    ;;#ASMSTART
+; SI-NEXT:    ; use v2
+; SI-NEXT:    ;;#ASMEND
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e64 v2, v6, -v2
+; VI-NEXT:    ;;#ASMSTART
+; VI-NEXT:    ; use v2
+; VI-NEXT:    ;;#ASMEND
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2321,6 +5464,69 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: ; use [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mul_f32_e32 v2, v6, v2
+; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT:    ;;#ASMSTART
+; SI-NEXT:    ; use v3
+; SI-NEXT:    ;;#ASMEND
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mul_f32_e32 v2, v6, v2
+; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT:    ;;#ASMSTART
+; VI-NEXT:    ; use v3
+; VI-NEXT:    ;;#ASMEND
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2356,6 +5562,65 @@ define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, -v6, v2, v3
+; SI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, -v6, v2, v3
+; VI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2390,6 +5655,65 @@ define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_mul_f32_e64 v2, -v6, v2
+; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_mul_f32_e64 v2, -v6, v2
+; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2423,6 +5747,65 @@ define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s5
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v6, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
+; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s5
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v6, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
+; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2501,6 +5884,79 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(ptr addrspace(1)
 ; GCN-NEXT: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: free_fold_src_code_size_cost_use_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; SI-NEXT:    v_mov_b32_e32 v2, s8
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    v_mul_f64 v[4:5], -v[0:1], v[4:5]
+; SI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: free_fold_src_code_size_cost_use_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; VI-NEXT:    v_mul_f64 v[2:3], -v[0:1], v[4:5]
+; VI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v5, s9
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2534,6 +5990,73 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1)
 ; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: one_use_cost_to_fold_into_src_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    flat_load_dword v8, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dword v0, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_trunc_f32_e32 v0, v8
+; SI-NEXT:    v_fma_f32 v2, -v0, v2, v3
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: one_use_cost_to_fold_into_src_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    flat_load_dword v8, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dword v0, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_trunc_f32_e32 v0, v8
+; VI-NEXT:    v_fma_f32 v2, -v0, v2, v3
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2564,6 +6087,79 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %o
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: multi_use_cost_to_fold_into_src:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT:    v_mov_b32_e32 v3, s13
+; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT:    v_mov_b32_e32 v7, s1
+; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT:    flat_load_dword v8, v[0:1] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v2, v[2:3] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v3, v[4:5] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_load_dword v4, v[6:7] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_trunc_f32_e32 v5, v8
+; SI-NEXT:    v_fma_f32 v2, -v5, v2, v3
+; SI-NEXT:    v_mul_f32_e32 v3, v5, v4
+; SI-NEXT:    flat_store_dword v[0:1], v2
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    flat_store_dword v[0:1], v3
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: multi_use_cost_to_fold_into_src:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT:    v_mov_b32_e32 v3, s13
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT:    v_mov_b32_e32 v7, s1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT:    flat_load_dword v8, v[0:1] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v2, v[2:3] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v3, v[4:5] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_load_dword v4, v[6:7] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_trunc_f32_e32 v5, v8
+; VI-NEXT:    v_fma_f32 v2, -v5, v2, v3
+; VI-NEXT:    v_mul_f32_e32 v3, v5, v4
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    flat_store_dword v[0:1], v3
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2599,6 +6195,17 @@ define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out
 ; GCN-DAG: v_mul_f32_e32 v1, [[SUB1]], v5
 ; GCN: s_setpc_b64
 define <2 x float> @fneg_fma_fneg_dagcombine_loop(<2 x float> %arg, <2 x float> %arg1, <2 x float> %arg2) #0 {
+; GCN-LABEL: fneg_fma_fneg_dagcombine_loop:
+; GCN:       ; %bb.0: ; %bb
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    s_brev_b32 s4, 1
+; GCN-NEXT:    v_fma_f32 v3, v3, -v5, s4
+; GCN-NEXT:    v_fma_f32 v2, v2, -v4, s4
+; GCN-NEXT:    v_sub_f32_e32 v1, v3, v1
+; GCN-NEXT:    v_sub_f32_e32 v0, v2, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v4
+; GCN-NEXT:    v_mul_f32_e32 v1, v1, v5
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %i3 = call fast <2 x float> @llvm.fma.v2f32(<2 x float> %arg1, <2 x float> %arg2, <2 x float> zeroinitializer)
   %i4 = fadd fast <2 x float> %i3, %arg
@@ -2612,6 +6219,11 @@ bb:
 ; GCN: s_waitcnt
 ; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
 define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: nnan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul float %x, -1.0
   %add = fmul nnan float %mul, %y
   ret float %add
@@ -2623,6 +6235,11 @@ define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN: v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT: s_setpc_b64
 define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denormal_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul nnan float %x, -1.0
   %add = fmul float %mul, %y
   ret float %add
@@ -2635,6 +6252,12 @@ define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
 ; GCN-NEXT: s_setpc_b64
 define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denorm_snan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e64 v0, v0, -v0
+; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %canonical = fmul float %x, %x
   %mul = fmul float %canonical, -1.0
   %add = fmul float %mul, %y
@@ -2646,6 +6269,12 @@ define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 [[TMP:v[0-9]+]], 1.0, v0
 ; GCN-NEXT: v_mul_f32_e64 v0, -[[TMP]], v1
 define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: flush_snan_fmul_neg1_to_fneg:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %quiet = call float @llvm.canonicalize.f32(float %x)
   %mul = fmul float %quiet, -1.0
   %add = fmul float %mul, %y
@@ -2658,6 +6287,13 @@ define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN-NEXT: v_sub_f32_e32 v0, v3, v0
 ; GCN-NEXT: s_setpc_b64
 define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT:    v_sub_f32_e32 v0, v3, v0
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg float %x
   %neg.y  = fneg float %y
@@ -2673,6 +6309,14 @@ define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z)
 ; GCN-NEXT: v_add_f64 v[0:1], v[5:6], -v[1:2]
 ; GCN-NEXT: s_setpc_b64
 define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f64:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; GCN-NEXT:    v_add_f64 v[0:1], v[5:6], -v[1:2]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg double %x
   %neg.y  = fneg double %y
@@ -2698,6 +6342,27 @@ define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: s_setpc_b64
 define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
+; SI-LABEL: fadd_select_fneg_fneg_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT:    v_sub_f32_e32 v0, v3, v0
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg half %x
   %neg.y = fneg half %y
@@ -2730,6 +6395,39 @@ define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
 define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x half> %y, <2 x half> %z) {
+; SI-LABEL: fadd_select_fneg_fneg_v2f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v1, v1, v2
+; SI-NEXT:    v_cvt_f16_f32_e32 v2, v4
+; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT:    v_or_b32_e32 v2, v3, v2
+; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v3, v4
+; SI-NEXT:    v_cvt_f32_f16_e32 v4, v5
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_sub_f32_e32 v0, v4, v0
+; SI-NEXT:    v_sub_f32_e32 v1, v3, v1
+; SI-NEXT:    s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_v2f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_sub_f16_sdwa v1, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
+; VI-NEXT:    v_or_b32_e32 v0, v0, v1
+; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg <2 x half> %x
   %neg.y = fneg <2 x half> %y
@@ -2746,6 +6444,12 @@ define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x ha
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-LABEL: v_fneg_select_f32:
+; GCN:       ; %bb.0:
+; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v2, -v1, vcc
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %select = select i1 %cond, float %a, float %b
   %fneg = fneg float %select
@@ -2769,6 +6473,23 @@ define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
 
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-SAFE-LABEL: v_fneg_select_2_f32:
+; GCN-SAFE:       ; %bb.0:
+; GCN-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-SAFE-NEXT:    v_add_f32_e32 v1, 2.0, v1
+; GCN-SAFE-NEXT:    v_add_f32_e32 v2, 4.0, v2
+; GCN-SAFE-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-SAFE-NEXT:    v_cndmask_b32_e64 v0, -v2, -v1, vcc
+; GCN-SAFE-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-NSZ-LABEL: v_fneg_select_2_f32:
+; GCN-NSZ:       ; %bb.0:
+; GCN-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NSZ-NEXT:    v_sub_f32_e32 v1, -2.0, v1
+; GCN-NSZ-NEXT:    v_sub_f32_e32 v2, -4.0, v2
+; GCN-NSZ-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NSZ-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NSZ-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %add.0 = fadd float %a, 2.0
   %add.1 = fadd float %b, 4.0
@@ -2782,6 +6503,41 @@ define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_posk_select_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, -4.0, -v4, vcc
+; SI-NEXT:    flat_store_dword v[1:2], v0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_posk_select_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, -4.0, -v4, vcc
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2799,6 +6555,41 @@ define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr add
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_negk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_negk_select_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s3
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    flat_load_dword v4, v[1:2] glc
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v2, s1
+; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 4.0, -v4, vcc
+; SI-NEXT:    flat_store_dword v[1:2], v0
+; SI-NEXT:    s_endpgm
+;
+; VI-LABEL: v_fneg_negk_select_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    flat_load_dword v4, v[1:2] glc
+; VI-NEXT:    s_waitcnt vmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s1
+; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 4.0, -v4, vcc
+; VI-NEXT:    flat_store_dword v[1:2], v0
+; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext

>From 9550d6a47e5356e28ea57cab9aa58964395d5510 Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Thu, 5 Jun 2025 01:32:03 -0500
Subject: [PATCH 4/7] Apply some reviwer comment tidyups

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 1 -
 llvm/lib/Target/AMDGPU/SIInstructions.td  | 1 -
 2 files changed, 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 083a62638ee09..2eb783421c87d 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5942,7 +5942,6 @@ SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
 SDValue SITargetLowering::lowerROTR(SDValue Op, SelectionDAG &DAG) const {
   unsigned Opc = Op.getOpcode();
   EVT VT = Op.getValueType();
-  assert(Opc == ISD::ROTR && "Expected ROTR Opcode for lowerROTR.");
 
   assert((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v8i32 ||
           VT == MVT::v16i32) &&
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index def34eb6ba173..3d8571f35265f 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -2379,7 +2379,6 @@ def : AMDGPUPat <
 let True16Predicate = NotHasTrue16BitInsts in {
 def : ROTRPattern <V_ALIGNBIT_B32_e64>;
 
-
 def : GCNPat<(i32 (trunc (srl i64:$src0, (and i32:$src1, (i32 31))))),
           (V_ALIGNBIT_B32_e64 (i32 (EXTRACT_SUBREG (i64 $src0), sub1)),
                           (i32 (EXTRACT_SUBREG (i64 $src0), sub0)), $src1)>;

>From 8bdc32cefa81ab1351a184ba160e4b554c7dd850 Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Thu, 5 Jun 2025 10:21:12 -0500
Subject: [PATCH 5/7] Tighten up the code for performFNEGCombine to limit the
 applicable types

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 10 ++--
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  1 -
 .../CodeGen/AMDGPU/fneg-modifier-casting.ll   | 53 ++++++++++---------
 3 files changed, 36 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index a317087f3577b..fb07695dda90a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5066,15 +5066,19 @@ SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
   }
   case ISD::SELECT: {
     // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
-    // This combine became necessary recently to prevent a regression after v2i32 xor was made legal.
-    // When adding this combine a case was added to performFNEGCombine to prevent this combine from
-    // being undone under certain conditions.
+    // This combine became necessary recently to prevent a regression caused by
+    // this patch legalising v2i32 xor. When adding this combine a case was
+    // added to performFNEGCombine to prevent this combine from being undone
+    // under certain conditions.
     // TODO: Invert conditions of foldFreeOpFromSelect
     SDValue Cond = N0.getOperand(0);
     SDValue LHS = N0.getOperand(1);
     SDValue RHS = N0.getOperand(2);
     EVT LHVT = LHS.getValueType();
     EVT RHVT = RHS.getValueType();
+    // The regression was limited to i32 v2/i32.
+    if(RHVT != MVT::i32 && LHVT != MVT::i32)
+      return SDValue();
 
     SDValue LFNeg = DAG.getNode(ISD::FNEG, SL, LHVT, LHS);
     SDValue RFNeg = DAG.getNode(ISD::FNEG, SL, RHVT, RHS);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2eb783421c87d..a4e6d06ffaf8b 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5940,7 +5940,6 @@ SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
 // Enable lowering of ROTR for vxi32 types. This is a workaround for a
 // regression caused by legalising v2i32 or.
 SDValue SITargetLowering::lowerROTR(SDValue Op, SelectionDAG &DAG) const {
-  unsigned Opc = Op.getOpcode();
   EVT VT = Op.getValueType();
 
   assert((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v8i32 ||
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 6f29d2c7418a2..a6159a01f404b 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -416,22 +416,19 @@ define double @fneg_xor_select_f64(i1 %cond, double %arg0, double %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
-; GCN-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GCN-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, -v4, -v2, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: fneg_xor_select_f64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; GFX11-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
 ; GFX11-NEXT:    v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_dual_cndmask_b32 v0, v3, v1 :: v_dual_cndmask_b32 v1, v4, v2
+; GFX11-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, -v4, -v2, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %select = select i1 %cond, double %arg0, double %arg1
   %fneg = fneg double %select
@@ -1642,16 +1639,19 @@ define amdgpu_kernel void @multiple_uses_fneg_select_f64(double %x, double %y, i
 ; GFX7-NEXT:    s_add_i32 s12, s12, s17
 ; GFX7-NEXT:    s_lshr_b32 flat_scratch_hi, s12, 8
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    s_and_b32 s6, 1, s6
+; GFX7-NEXT:    s_bitcmp1_b32 s6, 0
+; GFX7-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX7-NEXT:    s_and_b64 s[6:7], vcc, exec
+; GFX7-NEXT:    v_mov_b32_e32 v0, s3
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX7-NEXT:    s_cselect_b32 s1, s1, s3
-; GFX7-NEXT:    s_xor_b32 s3, s1, 0x80000000
-; GFX7-NEXT:    s_cmp_eq_u32 s6, 1
+; GFX7-NEXT:    v_cndmask_b32_e64 v0, -v0, -v1, vcc
 ; GFX7-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX7-NEXT:    s_cselect_b32 s1, s3, s1
+; GFX7-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX7-NEXT:    v_mov_b32_e32 v2, s4
 ; GFX7-NEXT:    s_mov_b32 flat_scratch_lo, s13
+; GFX7-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
 ; GFX7-NEXT:    v_mov_b32_e32 v0, s0
-; GFX7-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX7-NEXT:    v_mov_b32_e32 v3, s5
 ; GFX7-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; GFX7-NEXT:    s_endpgm
@@ -1663,32 +1663,37 @@ define amdgpu_kernel void @multiple_uses_fneg_select_f64(double %x, double %y, i
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[8:9], 0x18
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_and_b32 s6, 1, s6
+; GFX9-NEXT:    s_bitcmp1_b32 s6, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
+; GFX9-NEXT:    s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT:    v_mov_b32_e32 v0, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX9-NEXT:    s_cselect_b32 s1, s1, s3
-; GFX9-NEXT:    s_xor_b32 s3, s1, 0x80000000
-; GFX9-NEXT:    s_cmp_eq_u32 s6, 1
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, -v0, -v1, vcc
 ; GFX9-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX9-NEXT:    s_cselect_b32 s1, s3, s1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: multiple_uses_fneg_select_f64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x2
-; GFX11-NEXT:    s_load_b32 s6, s[4:5], 0x10
 ; GFX11-NEXT:    s_load_b128 s[0:3], s[4:5], 0x0
+; GFX11-NEXT:    s_load_b32 s6, s[4:5], 0x10
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x18
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_and_b32 s6, 1, s6
+; GFX11-NEXT:    v_mov_b32_e32 v0, s1
+; GFX11-NEXT:    s_bitcmp1_b32 s6, 0
+; GFX11-NEXT:    s_cselect_b32 vcc_lo, -1, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, -s3, -v0, vcc_lo
+; GFX11-NEXT:    s_and_b32 s6, vcc_lo, exec_lo
 ; GFX11-NEXT:    s_cselect_b32 s1, s1, s3
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_xor_b32 s3, s1, 0x80000000
-; GFX11-NEXT:    s_cmp_eq_u32 s6, 1
 ; GFX11-NEXT:    s_cselect_b32 s0, s0, s2
-; GFX11-NEXT:    s_cselect_b32 s1, s3, s1
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_cndmask_b32 v1, s1, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[4:5]
 ; GFX11-NEXT:    s_endpgm

>From 359fde691572e902db084e26dcfaa6dd78c3322e Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Fri, 6 Jun 2025 04:43:39 -0500
Subject: [PATCH 6/7] Formatting and tidying up, responding to reviewer
 comments

---
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp |    2 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |    4 -
 llvm/lib/Target/AMDGPU/SIInstructions.td      |   44 +-
 llvm/test/CodeGen/AMDGPU/fneg-combines.ll     | 3791 -----------------
 llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll |  211 +-
 5 files changed, 117 insertions(+), 3935 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index fb07695dda90a..1ba8a78a64bc0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -5077,7 +5077,7 @@ SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
     EVT LHVT = LHS.getValueType();
     EVT RHVT = RHS.getValueType();
     // The regression was limited to i32 v2/i32.
-    if(RHVT != MVT::i32 && LHVT != MVT::i32)
+    if (RHVT != MVT::i32 && LHVT != MVT::i32)
       return SDValue();
 
     SDValue LFNeg = DAG.getNode(ISD::FNEG, SL, LHVT, LHS);
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a4e6d06ffaf8b..2d319dd4aa669 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -13014,12 +13014,8 @@ SDValue SITargetLowering::performXorCombine(SDNode *N,
           return DAG.getNode(ISD::BITCAST, DL, VT, NewSelect);
         }
     }
-    // Possibly split vector here if one side does have a constant RHS.
   }
 
-  // Add test for when only one of the RHS vector elements is a const. Might be
-  // possible to optimise this case.
-
   const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
 
   if (CRHS && VT == MVT::i64) {
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 3d8571f35265f..76cf3adddebe0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -1793,7 +1793,6 @@ def : GCNPat <
 >;
 }
 
-
 /********** ================================ **********/
 /********** Floating point absolute/negative **********/
 /********** ================================ **********/
@@ -2389,31 +2388,25 @@ def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
 } // end True16Predicate = NotHasTrue16BitInsts
 
 let True16Predicate = UseRealTrue16Insts in {
+  def : GCNPat<(rotr i32:$src0, i32:$src1),
+               (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
+                   /* src1_modifiers */ 0, $src0,
+                   /* src2_modifiers */ 0, (EXTRACT_SUBREG $src1, lo16),
+                   /* clamp */ 0, /* op_sel */ 0)>;
 
-def : GCNPat <
-  (rotr i32:$src0, i32:$src1),
-  (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
-                          /* src1_modifiers */ 0, $src0,
-                          /* src2_modifiers */ 0,
-                          (EXTRACT_SUBREG $src1, lo16),
-                          /* clamp */ 0, /* op_sel */ 0)
->;
-
-def : GCNPat<(i32 (trunc (srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
-          (V_ALIGNBIT_B32_t16_e64 0, /* src0_modifiers */
-                          (i32 (EXTRACT_SUBREG (i64 $src0), sub1)),
-                          0, /* src1_modifiers */
-                          (i32 (EXTRACT_SUBREG (i64 $src0), sub0)),
-                          0, /* src2_modifiers */
-                          (i16 (EXTRACT_SUBREG VGPR_32:$src1, lo16)),
-                          /* clamp */ 0, /* op_sel */ 0)>;
-
-def : GCNPat<(fshr i32:$src0, i32:$src1, i32:$src2),
-          (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
-                          /* src1_modifiers */ 0, $src1,
-                          /* src2_modifiers */ 0,
-                          (EXTRACT_SUBREG VGPR_32:$src2, lo16),
-                          /* clamp */ 0, /* op_sel */ 0)>;
+  def : GCNPat<
+            (i32(trunc(srl i64:$src0, (i32 ShiftAmt32Imm:$src1)))),
+            (V_ALIGNBIT_B32_t16_e64 0,                     /* src0_modifiers */
+                (i32(EXTRACT_SUBREG(i64 $src0), sub1)), 0, /* src1_modifiers */
+                (i32(EXTRACT_SUBREG(i64 $src0), sub0)), 0, /* src2_modifiers */
+                (i16(EXTRACT_SUBREG VGPR_32:$src1, lo16)),
+                /* clamp */ 0, /* op_sel */ 0)>;
+
+  def : GCNPat<(fshr i32:$src0, i32:$src1, i32:$src2),
+               (V_ALIGNBIT_B32_t16_e64 /* src0_modifiers */ 0, $src0,
+                   /* src1_modifiers */ 0, $src1,
+                   /* src2_modifiers */ 0, (EXTRACT_SUBREG VGPR_32:$src2, lo16),
+                   /* clamp */ 0, /* op_sel */ 0)>;
 } // end True16Predicate = UseRealTrue16Insts
 
 let True16Predicate = UseFakeTrue16Insts in {
@@ -2451,7 +2444,6 @@ def : GCNPat<(fshr i32:$src0, i32:$src1, i32:$src2),
 >;
 } // end True16Predicate = UseFakeTrue16Insts
 
-
 /********** ====================== **********/
 /**********   Indirect addressing  **********/
 /********** ====================== **********/
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index b8af82d90d6b5..14c18df6d8e41 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc -mtriple=amdgcn -mcpu=hawaii -start-before=amdgpu-unify-divergent-exit-nodes -mattr=+flat-for-global < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,SI %s
 ; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,SI %s
 
@@ -42,57 +41,6 @@ define amdgpu_kernel void @v_fneg_add_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_add_store_use_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_add_store_use_add_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_f32_e32 v2, v4, v2
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_add_store_use_add_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_f32_e32 v2, v4, v2
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -309,17 +257,6 @@ define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i
 ; GCN-NSZ-DAG: v_cmp_nlt_f32_e32 {{.*}}, 0
 ; GCN-NSZ-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, [[C]], 0,
 define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #2 {
-; GCN-NSZ-LABEL: fneg_fadd_0_nsz:
-; GCN-NSZ:       ; %bb.0: ; %.entry
-; GCN-NSZ-NEXT:    v_rcp_f32_e32 v0, s1
-; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-NSZ-NEXT:    v_mul_f32_e32 v0, 0, v0
-; GCN-NSZ-NEXT:    v_cmp_ngt_f32_e32 vcc, s0, v0
-; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, -v0, v1, vcc
-; GCN-NSZ-NEXT:    v_mov_b32_e32 v1, 0x7fc00000
-; GCN-NSZ-NEXT:    v_cmp_nlt_f32_e32 vcc, 0, v0
-; GCN-NSZ-NEXT:    v_cndmask_b32_e64 v0, v1, 0, vcc
-; GCN-NSZ-NEXT:    ; return to shader part epilog
 .entry:
   %tmp7 = fdiv afn float 1.000000e+00, %tmp6
   %tmp8 = fmul float 0.000000e+00, %tmp7
@@ -343,51 +280,6 @@ define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -409,57 +301,6 @@ define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_store_use_mul_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_store_use_mul_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -485,57 +326,6 @@ define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_multi_use_mul_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e64 v2, v4, -v2
-; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_multi_use_mul_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e64 v2, v4, -v2
-; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -557,51 +347,6 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -622,51 +367,6 @@ define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_x_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_f32_e32 v2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_x_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_f32_e32 v2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -687,51 +387,6 @@ define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_mul_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_fneg_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_f32_e64 v2, v0, -v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_fneg_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_f32_e64 v2, v0, -v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -756,57 +411,6 @@ define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr a
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
-; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
-; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -830,61 +434,6 @@ define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
-; SI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mul_f32_e64 v3, -v4, s2
-; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mul_f32_e64 v3, -v4, s2
-; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -913,55 +462,6 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; SI-NEXT:    v_max_f32_e32 v2, v3, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; VI-NEXT:    v_max_f32_e32 v2, v3, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -981,10 +481,6 @@ define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_max_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   ret float %fneg
@@ -996,41 +492,6 @@ define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_self_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, v2, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_self_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_max_f32_e32 v2, v2, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1047,10 +508,6 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_self_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v0
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %a)
   %min.fneg = fneg float %min
   ret float %min.fneg
@@ -1062,41 +519,6 @@ define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_posk_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, -4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_posk_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_max_f32_e32 v2, -4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1113,10 +535,6 @@ define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_posk_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, -4.0
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -1128,41 +546,6 @@ define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_negk_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, 4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_negk_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_max_f32_e32 v2, 4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1179,10 +562,6 @@ define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_max_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_negk_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, 4.0
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -4.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -1195,41 +574,6 @@ define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MIN]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_0_minnum_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_min_f32_e32 v2, 0, v3
-; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_0_minnum_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_min_f32_e32 v2, 0, v3
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1247,41 +591,6 @@ define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_neg0_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, 0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_neg0_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_max_f32_e32 v2, 0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1305,42 +614,6 @@ define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, pt
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_inv2pi_minnum_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inv2pi_minnum_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v3
-; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1363,41 +636,6 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_max_f32_e32 v2, 0x3e22f983, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_max_f32_e32 v2, 0.15915494, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1422,43 +660,6 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_inv2pi_minnum_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT:    v_max_f32_e32 v0, 0xbe230000, v0
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inv2pi_minnum_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_max_f16_e32 v2, v3, v3
-; VI-NEXT:    v_min_f16_e32 v2, 0.15915494, v2
-; VI-NEXT:    v_xor_b32_e32 v2, 0x8000, v2
-; VI-NEXT:    flat_store_short v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1482,42 +683,6 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_ushort v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cvt_f32_f16_e64 v0, -v0
-; SI-NEXT:    v_max_f32_e32 v0, 0x3e230000, v0
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_ushort v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_max_f16_e64 v2, -v3, -v3
-; VI-NEXT:    v_max_f16_e32 v2, 0.15915494, v2
-; VI-NEXT:    flat_store_short v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1542,44 +707,6 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, p
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RESULT_LO]]:[[RESULT_HI]]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_inv2pi_minnum_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
-; SI-NEXT:    s_mov_b32 s3, 0xbfc45f30
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
-; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inv2pi_minnum_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_max_f64 v[0:1], v[0:1], v[0:1]
-; VI-NEXT:    v_min_f64 v[0:1], v[0:1], 0.15915494309189532
-; VI-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1604,43 +731,6 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr a
 
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_mov_b32 s2, 0x6dc9c882
-; SI-NEXT:    s_mov_b32 s3, 0x3fc45f30
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
-; SI-NEXT:    v_max_f64 v[0:1], v[0:1], s[2:3]
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_max_f64 v[0:1], -v[0:1], -v[0:1]
-; VI-NEXT:    v_max_f64 v[0:1], v[0:1], 0.15915494309189532
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1657,10 +747,6 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, p
 ; GCN: v_max_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_neg0_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, 0
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float -0.0, float %a)
   %fneg = fneg float %min
   ret float %fneg
@@ -1674,55 +760,6 @@ define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; SI-NEXT:    v_min_f32_e32 v2, 0, v2
-; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; VI-NEXT:    v_min_f32_e32 v2, 0, v2
-; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1752,55 +789,6 @@ define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1
 
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v4
-; SI-NEXT:    v_max_f32_e32 v2, 0xbe22f983, v2
-; SI-NEXT:    v_mul_f32_e32 v2, v2, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v2
-; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1822,11 +810,6 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_0_minnum_foldable_use_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e32 v0, 0, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float 0.0, float %a)
   %fneg = fneg float %min
   %mul = fmul float %fneg, %b
@@ -1845,61 +828,6 @@ define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; SI-NEXT:    v_max_f32_e32 v2, v3, v2
-; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; VI-NEXT:    v_max_f32_e32 v2, v3, v2
-; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1922,11 +850,6 @@ define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_minnum_multi_use_minnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e64 v0, -v0, -v1
-; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
-; GCN-NEXT:    ; return to shader part epilog
   %min = call float @llvm.minnum.f32(float %a, float %b)
   %fneg = fneg float %min
   %use1 = fmul float %min, 4.0
@@ -1948,55 +871,6 @@ define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; SI-NEXT:    v_min_f32_e32 v2, v3, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v5
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; VI-NEXT:    v_min_f32_e32 v2, v3, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2016,10 +890,6 @@ define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr add
 ; GCN: v_min_f32_e64 v0, -v0, -v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   ret float %fneg
@@ -2031,41 +901,6 @@ define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_self_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_min_f32_e32 v2, v2, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_self_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_min_f32_e32 v2, v2, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2082,10 +917,6 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -v0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_self_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v0
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %a)
   %max.fneg = fneg float %max
   ret float %max.fneg
@@ -2097,41 +928,6 @@ define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_posk_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_min_f32_e32 v2, -4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_posk_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_min_f32_e32 v2, -4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2148,10 +944,6 @@ define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, -4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_posk_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, -4.0
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -2163,41 +955,6 @@ define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_negk_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_min_f32_e32 v2, 4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_negk_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_min_f32_e32 v2, 4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2214,10 +971,6 @@ define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 4.0
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_negk_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, 4.0
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -4.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -2230,41 +983,6 @@ define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MAX]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_0_maxnum_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_max_f32_e32 v2, 0, v3
-; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_0_maxnum_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_max_f32_e32 v2, 0, v3
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2282,41 +1000,6 @@ define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    v_min_f32_e32 v2, 0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    v_min_f32_e32 v2, 0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2333,10 +1016,6 @@ define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, pt
 ; GCN: v_min_f32_e64 v0, -v0, 0{{$}}
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
-; GCN-LABEL: v_fneg_neg0_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, 0
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float -0.0, float %a)
   %fneg = fneg float %max
   ret float %fneg
@@ -2350,55 +1029,6 @@ define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; SI-NEXT:    v_max_f32_e32 v2, 0, v2
-; SI-NEXT:    v_mul_f32_e64 v2, -v2, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, 1.0, v4
-; VI-NEXT:    v_max_f32_e32 v2, 0, v2
-; VI-NEXT:    v_mul_f32_e64 v2, -v2, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2420,11 +1050,6 @@ define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1
 ; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], v1
 ; GCN-NEXT: ; return
 define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_0_maxnum_foldable_use_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_max_f32_e32 v0, 0, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float 0.0, float %a)
   %fneg = fneg float %max
   %mul = fmul float %fneg, %b
@@ -2443,61 +1068,6 @@ define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; SI-NEXT:    v_min_f32_e32 v2, v3, v2
-; SI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e32 v3, -1.0, v4
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v2
-; VI-NEXT:    v_min_f32_e32 v2, v3, v2
-; VI-NEXT:    v_mul_f32_e32 v3, -4.0, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2520,11 +1090,6 @@ define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace
 ; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
 ; GCN-NEXT: ; return
 define amdgpu_ps <2 x float> @v_fneg_maxnum_multi_use_maxnum_f32_no_ieee(float %a, float %b) #0 {
-; GCN-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_no_ieee:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    v_min_f32_e64 v0, -v0, -v1
-; GCN-NEXT:    v_mul_f32_e32 v1, -4.0, v0
-; GCN-NEXT:    ; return to shader part epilog
   %max = call float @llvm.maxnum.f32(float %a, float %b)
   %fneg = fneg float %max
   %use1 = fmul float %max, 4.0
@@ -2574,65 +1139,6 @@ define amdgpu_kernel void @v_fneg_fma_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
-; SI-LABEL: v_fneg_fma_store_use_fma_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_fma_f32 v2, v6, v2, v3
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fma_store_use_fma_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_fma_f32 v2, v6, v2, v3
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2988,41 +1494,6 @@ define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(ptr addrspace(1) %out,
 ; GCN: v_cvt_f64_f32_e64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_extend_f32_to_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_extend_f32_to_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_cvt_f64_f32_e64 v[0:1], -v1
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3039,41 +1510,6 @@ define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, pt
 ; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
 ; GCN: {{buffer|flat}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3093,49 +1529,6 @@ define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %ou
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FNEG_A]]
 define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v4, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
-; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v4, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v4
-; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v4
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v4
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3156,51 +1549,6 @@ define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[CVT_LO]]:[[CVT_HI]]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
-; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
-; SI-NEXT:    v_mov_b32_e32 v4, v0
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
-; VI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
-; VI-NEXT:    v_mov_b32_e32 v4, v0
-; VI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3221,52 +1569,6 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrsp
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_cvt_f64_f32_e32 v[0:1], v1
-; SI-NEXT:    v_xor_b32_e32 v5, 0x80000000, v1
-; SI-NEXT:    v_mov_b32_e32 v4, v0
-; SI-NEXT:    v_mul_f64 v[0:1], v[0:1], 4.0
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v6, s1
-; VI-NEXT:    v_add_u32_e32 v5, vcc, s0, v0
-; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cvt_f64_f32_e32 v[1:2], v1
-; VI-NEXT:    v_mul_f64 v[3:4], v[1:2], 4.0
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_dwordx2 v[5:6], v[1:2]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dwordx2 v[5:6], v[3:4]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3283,49 +1585,6 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(p
 ; FIXME: Source modifiers not folded for f16->f32
 ; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3340,51 +1599,6 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrsp
 
 ; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
 define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_ushort v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; SI-NEXT:    v_cvt_f32_f16_e64 v4, -v1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, 4.0, v3
-; SI-NEXT:    flat_store_dword v[0:1], v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 1, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_ushort v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f32_f16_e32 v3, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
-; VI-NEXT:    v_mul_f32_e32 v3, 4.0, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3407,41 +1621,6 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(p
 ; GCN: v_cvt_f32_f64_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_f64_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_f64_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f32_f64_e64 v2, -v[1:2]
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3458,41 +1637,6 @@ define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3512,49 +1656,6 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[A_LO]]:[[NEG_A_HI]]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v4, s1
-; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
-; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
-; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[3:4], v5
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v4, s1
-; VI-NEXT:    v_add_u32_e32 v3, vcc, s0, v0
-; VI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; VI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[3:4], v5
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[1:2]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3576,51 +1677,6 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, double %c) #0 {
-; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v4, s1
-; SI-NEXT:    v_add_i32_e32 v3, vcc, s0, v0
-; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; SI-NEXT:    v_cvt_f32_f64_e32 v5, v[1:2]
-; SI-NEXT:    v_mul_f64 v[0:1], -v[1:2], s[4:5]
-; SI-NEXT:    flat_store_dword v[3:4], v5
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dwordx2 v[0:1], v[0:1]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v5, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_mul_f64 v[3:4], -v[1:2], s[4:5]
-; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[3:4]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3640,41 +1696,6 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspa
 ; GCN: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_f32_to_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_f32_to_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f16_f32_e64 v3, -v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; VI-NEXT:    flat_store_short v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3691,41 +1712,6 @@ define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr
 ; GCN: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v1, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v1
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v1, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f16_f32_e32 v3, v1
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; VI-NEXT:    flat_store_short v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3745,49 +1731,6 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
 define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v3, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 3, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dwordx2 v[1:2], v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_cvt_f32_f64_e32 v2, v[1:2]
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3807,49 +1750,6 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v2, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
-; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v2, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v2
-; VI-NEXT:    flat_store_short v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3870,51 +1770,6 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
 define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
-; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v1
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v2, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v2
-; SI-NEXT:    v_mul_f32_e64 v2, -v2, s4
-; SI-NEXT:    flat_store_short v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v1, 2, v0
-; VI-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v1
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v2, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_cvt_f16_f32_e32 v3, v2
-; VI-NEXT:    v_mul_f32_e64 v2, -v2, s4
-; VI-NEXT:    flat_store_short v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3938,39 +1793,6 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspa
 ; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_rcp_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_rcp_f32_e64 v3, -v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_rcp_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_rcp_f32_e64 v3, -v0
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -3987,39 +1809,6 @@ define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_rcp_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_rcp_f32_e32 v3, v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_rcp_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_rcp_f32_e32 v3, v0
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4039,47 +1828,6 @@ define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrsp
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_rcp_store_use_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_rcp_f32_e32 v4, v3
-; SI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
-; SI-NEXT:    flat_store_dword v[0:1], v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_rcp_store_use_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_rcp_f32_e32 v4, v3
-; VI-NEXT:    v_xor_b32_e32 v2, 0x80000000, v3
-; VI-NEXT:    flat_store_dword v[0:1], v4
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4100,49 +1848,6 @@ define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
-; SI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dword s4, s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_rcp_f32_e32 v4, v3
-; SI-NEXT:    v_mul_f32_e64 v2, -v3, s4
-; SI-NEXT:    flat_store_dword v[0:1], v4
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dword s4, s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_rcp_f32_e32 v4, v3
-; VI-NEXT:    v_mul_f32_e64 v2, -v3, s4
-; VI-NEXT:    flat_store_dword v[0:1], v4
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4167,51 +1872,6 @@ define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out,
 ; GCN: v_mul_legacy_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_legacy_f32_e64 v2, v5, -v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4235,57 +1895,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addr
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4310,57 +1919,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrsp
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
-; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_legacy_f32_e64 v2, v4, -v2
-; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v2, 4.0
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4382,51 +1940,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrsp
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4447,51 +1960,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_legacy_f32_e32 v2, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4512,51 +1980,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, p
 ; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
 define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v1, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v1, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mul_legacy_f32_e64 v2, v0, -v1
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4580,57 +2003,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
 define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
-; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v4
-; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4654,61 +2026,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
-; SI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s6, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v4, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_load_dword s2, s[4:5], 0xf
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
-; SI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s7
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s6, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v4, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_load_dword s2, s[4:5], 0x3c
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mul_legacy_f32_e64 v3, -v4, s2
-; VI-NEXT:    v_mul_legacy_f32_e32 v2, v4, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4736,43 +2053,6 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(
 ; GCN: v_sin_f32_e32 [[RESULT:v[0-9]+]], [[FRACT]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_sin_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
-; SI-NEXT:    v_fract_f32_e32 v0, v0
-; SI-NEXT:    v_sin_f32_e32 v3, v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_sin_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e32 v0, 0xbe22f983, v0
-; VI-NEXT:    v_fract_f32_e32 v0, v0
-; VI-NEXT:    v_sin_f32_e32 v3, v0
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4789,39 +2069,6 @@ define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1
 ; GCN: v_sin_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_amdgcn_sin_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_sin_f32_e64 v3, -v0
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_amdgcn_sin_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_sin_f32_e64 v3, -v0
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4842,39 +2089,6 @@ define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addr
 ; GCN: v_trunc_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_trunc_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_trunc_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_trunc_f32_e64 v2, -v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_trunc_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_trunc_f32_e64 v2, -v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4922,39 +2136,6 @@ define amdgpu_kernel void @v_fneg_round_f32(ptr addrspace(1) %out, ptr addrspace
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_rint_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_rndne_f32_e64 v2, -v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_rint_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_rndne_f32_e64 v2, -v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -4975,39 +2156,6 @@ define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(
 ; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_nearbyint_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_rndne_f32_e64 v2, -v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_nearbyint_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_rndne_f32_e64 v2, -v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5028,39 +2176,6 @@ define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], -1.0, [[A]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
 define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
-; SI-LABEL: v_fneg_canonicalize_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    flat_load_dword v3, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_canonicalize_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    flat_load_dword v3, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v2
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, -1.0, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5083,63 +2198,6 @@ define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr ad
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_interp_p1_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_mov_b32 m0, 0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; SI-NEXT:    v_interp_p1_f32 v3, v2, attr0.x
-; SI-NEXT:    v_interp_p1_f32 v2, v2, attr0.y
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_interp_p1_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_mov_b32 m0, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; VI-NEXT:    v_interp_p1_f32_e32 v3, v2, attr0.x
-; VI-NEXT:    v_interp_p1_f32_e32 v2, v2, attr0.y
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5163,67 +2221,6 @@ define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 ; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
-; SI-LABEL: v_fneg_interp_p2_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    v_mov_b32_e32 v6, 4.0
-; SI-NEXT:    s_mov_b32 m0, 0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    flat_load_dword v5, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_mov_b32_e32 v3, 4.0
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; SI-NEXT:    v_interp_p2_f32 v6, v2, attr0.x
-; SI-NEXT:    v_interp_p2_f32 v3, v2, attr0.y
-; SI-NEXT:    flat_store_dword v[0:1], v6
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_interp_p2_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    v_mov_b32_e32 v6, 4.0
-; VI-NEXT:    s_mov_b32 m0, 0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dword v5, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_mov_b32_e32 v3, 4.0
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e64 v2, v5, -v2
-; VI-NEXT:    v_interp_p2_f32_e32 v6, v2, attr0.x
-; VI-NEXT:    v_interp_p2_f32_e32 v3, v2, attr0.y
-; VI-NEXT:    flat_store_dword v[0:1], v6
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5259,85 +2256,6 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 
 define amdgpu_kernel void @v_fneg_copytoreg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
-; SI-LABEL: v_fneg_copytoreg_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; SI-NEXT:    s_load_dword s0, s[4:5], 0x11
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s13
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v7, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s8, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    s_cmp_lg_u32 s0, 0
-; SI-NEXT:    v_mul_f32_e32 v3, v7, v3
-; SI-NEXT:    s_cbranch_scc0 .LBB105_2
-; SI-NEXT:  ; %bb.1: ; %endif
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-; SI-NEXT:  .LBB105_2: ; %if
-; SI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
-; SI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_copytoreg_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; VI-NEXT:    s_load_dword s0, s[4:5], 0x44
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s15
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v7, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s9
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s8, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    s_cmp_lg_u32 s0, 0
-; VI-NEXT:    v_mul_f32_e32 v3, v7, v3
-; VI-NEXT:    s_cbranch_scc0 .LBB105_2
-; VI-NEXT:  ; %bb.1: ; %endif
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
-; VI-NEXT:  .LBB105_2: ; %if
-; VI-NEXT:    v_xor_b32_e32 v4, 0x80000000, v3
-; VI-NEXT:    v_mul_f32_e32 v2, v4, v2
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5374,67 +2292,6 @@ endif:
 ; GCN: ; use [[MUL]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
-; SI-LABEL: v_fneg_inlineasm_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e64 v2, v6, -v2
-; SI-NEXT:    ;;#ASMSTART
-; SI-NEXT:    ; use v2
-; SI-NEXT:    ;;#ASMEND
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inlineasm_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e64 v2, v6, -v2
-; VI-NEXT:    ;;#ASMSTART
-; VI-NEXT:    ; use v2
-; VI-NEXT:    ;;#ASMEND
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5464,69 +2321,6 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrs
 ; GCN: ; use [[NEG]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
 define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
-; SI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mul_f32_e32 v2, v6, v2
-; SI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; SI-NEXT:    ;;#ASMSTART
-; SI-NEXT:    ; use v3
-; SI-NEXT:    ;;#ASMEND
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mul_f32_e32 v2, v6, v2
-; VI-NEXT:    v_xor_b32_e32 v3, 0x80000000, v2
-; VI-NEXT:    ;;#ASMSTART
-; VI-NEXT:    ; use v3
-; VI-NEXT:    ;;#ASMEND
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5562,65 +2356,6 @@ define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
-; SI-LABEL: multiuse_fneg_2_vop3_users_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_fma_f32 v2, -v6, v2, v3
-; SI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: multiuse_fneg_2_vop3_users_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_fma_f32 v2, -v6, v2, v3
-; VI-NEXT:    v_fma_f32 v3, -v6, v3, 2.0
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5655,65 +2390,6 @@ define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
-; SI-LABEL: multiuse_fneg_2_vop2_users_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_mul_f32_e64 v2, -v6, v2
-; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: multiuse_fneg_2_vop2_users_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_mul_f32_e64 v2, -v6, v2
-; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5747,65 +2423,6 @@ define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out,
 ; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
-; SI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v4
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s5
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s4, v4
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s7
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s6, v4
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v6, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
-; SI-NEXT:    v_mul_f32_e64 v3, -v6, v3
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s2, v4
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s5
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s4, v4
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s7
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s6, v4
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v6, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    v_fma_f32 v2, -v6, v2, 2.0
-; VI-NEXT:    v_mul_f32_e64 v3, -v6, v3
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5884,79 +2501,6 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(ptr addrspace(1)
 ; GCN-NEXT: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
-; SI-LABEL: free_fold_src_code_size_cost_use_f64:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s13
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v7, s1
-; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
-; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; SI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
-; SI-NEXT:    v_mov_b32_e32 v2, s8
-; SI-NEXT:    v_mov_b32_e32 v3, s9
-; SI-NEXT:    v_mul_f64 v[4:5], -v[0:1], v[4:5]
-; SI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[4:5]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: free_fold_src_code_size_cost_use_f64:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    flat_load_dwordx2 v[0:1], v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v5, s15
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
-; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dwordx2 v[6:7], v[6:7] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
-; VI-NEXT:    v_mul_f64 v[2:3], -v[0:1], v[4:5]
-; VI-NEXT:    v_mul_f64 v[0:1], -v[0:1], v[6:7]
-; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    v_mov_b32_e32 v5, s9
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -5990,73 +2534,6 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1)
 ; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
-; SI-LABEL: one_use_cost_to_fold_into_src_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s13
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    flat_load_dword v8, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v7, s1
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
-; SI-NEXT:    flat_load_dword v0, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_trunc_f32_e32 v0, v8
-; SI-NEXT:    v_fma_f32 v2, -v0, v2, v3
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: one_use_cost_to_fold_into_src_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s15
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    flat_load_dword v8, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s0, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v7, vcc
-; VI-NEXT:    flat_load_dword v0, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_trunc_f32_e32 v0, v8
-; VI-NEXT:    v_fma_f32 v2, -v0, v2, v3
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_mov_b32_e32 v1, s9
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -6087,79 +2564,6 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %o
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
 ; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
 define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
-; SI-LABEL: multi_use_cost_to_fold_into_src:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x9
-; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x11
-; SI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s10, v6
-; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v3, s13
-; SI-NEXT:    v_add_i32_e32 v2, vcc, s12, v6
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    v_add_i32_e32 v4, vcc, s14, v6
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v7, s1
-; SI-NEXT:    v_add_i32_e32 v6, vcc, s0, v6
-; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; SI-NEXT:    flat_load_dword v8, v[0:1] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v2, v[2:3] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v3, v[4:5] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_load_dword v4, v[6:7] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    v_trunc_f32_e32 v5, v8
-; SI-NEXT:    v_fma_f32 v2, -v5, v2, v3
-; SI-NEXT:    v_mul_f32_e32 v3, v5, v4
-; SI-NEXT:    flat_store_dword v[0:1], v2
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    flat_store_dword v[0:1], v3
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: multi_use_cost_to_fold_into_src:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x44
-; VI-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    v_add_u32_e32 v0, vcc, s10, v6
-; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT:    v_mov_b32_e32 v3, s13
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s12, v6
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; VI-NEXT:    v_mov_b32_e32 v5, s15
-; VI-NEXT:    v_add_u32_e32 v4, vcc, s14, v6
-; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_add_u32_e32 v6, vcc, s0, v6
-; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; VI-NEXT:    flat_load_dword v8, v[0:1] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v2, v[2:3] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v3, v[4:5] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_load_dword v4, v[6:7] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_mov_b32_e32 v1, s9
-; VI-NEXT:    v_trunc_f32_e32 v5, v8
-; VI-NEXT:    v_fma_f32 v2, -v5, v2, v3
-; VI-NEXT:    v_mul_f32_e32 v3, v5, v4
-; VI-NEXT:    flat_store_dword v[0:1], v2
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    flat_store_dword v[0:1], v3
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -6195,17 +2599,6 @@ define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out
 ; GCN-DAG: v_mul_f32_e32 v1, [[SUB1]], v5
 ; GCN: s_setpc_b64
 define <2 x float> @fneg_fma_fneg_dagcombine_loop(<2 x float> %arg, <2 x float> %arg1, <2 x float> %arg2) #0 {
-; GCN-LABEL: fneg_fma_fneg_dagcombine_loop:
-; GCN:       ; %bb.0: ; %bb
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_brev_b32 s4, 1
-; GCN-NEXT:    v_fma_f32 v3, v3, -v5, s4
-; GCN-NEXT:    v_fma_f32 v2, v2, -v4, s4
-; GCN-NEXT:    v_sub_f32_e32 v1, v3, v1
-; GCN-NEXT:    v_sub_f32_e32 v0, v2, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v4
-; GCN-NEXT:    v_mul_f32_e32 v1, v1, v5
-; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %i3 = call fast <2 x float> @llvm.fma.v2f32(<2 x float> %arg1, <2 x float> %arg2, <2 x float> zeroinitializer)
   %i4 = fadd fast <2 x float> %i3, %arg
@@ -6219,11 +2612,6 @@ bb:
 ; GCN: s_waitcnt
 ; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
 define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
-; GCN-LABEL: nnan_fmul_neg1_to_fneg:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul float %x, -1.0
   %add = fmul nnan float %mul, %y
   ret float %add
@@ -6235,11 +2623,6 @@ define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN: v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT: s_setpc_b64
 define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
-; GCN-LABEL: denormal_fmul_neg1_to_fneg:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %mul = fmul nnan float %x, -1.0
   %add = fmul float %mul, %y
   ret float %add
@@ -6252,12 +2635,6 @@ define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
 ; GCN-NEXT: s_setpc_b64
 define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
-; GCN-LABEL: denorm_snan_fmul_neg1_to_fneg:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_mul_f32_e64 v0, v0, -v0
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %canonical = fmul float %x, %x
   %mul = fmul float %canonical, -1.0
   %add = fmul float %mul, %y
@@ -6269,12 +2646,6 @@ define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
 ; GCN-NEXT: v_mul_f32_e32 [[TMP:v[0-9]+]], 1.0, v0
 ; GCN-NEXT: v_mul_f32_e64 v0, -[[TMP]], v1
 define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
-; GCN-LABEL: flush_snan_fmul_neg1_to_fneg:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %quiet = call float @llvm.canonicalize.f32(float %x)
   %mul = fmul float %quiet, -1.0
   %add = fmul float %mul, %y
@@ -6287,13 +2658,6 @@ define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
 ; GCN-NEXT: v_sub_f32_e32 v0, v3, v0
 ; GCN-NEXT: s_setpc_b64
 define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z) {
-; GCN-LABEL: fadd_select_fneg_fneg_f32:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; GCN-NEXT:    v_sub_f32_e32 v0, v3, v0
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg float %x
   %neg.y  = fneg float %y
@@ -6309,14 +2673,6 @@ define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z)
 ; GCN-NEXT: v_add_f64 v[0:1], v[5:6], -v[1:2]
 ; GCN-NEXT: s_setpc_b64
 define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double %z) {
-; GCN-LABEL: fadd_select_fneg_fneg_f64:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
-; GCN-NEXT:    v_add_f64 v[0:1], v[5:6], -v[1:2]
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg double %x
   %neg.y  = fneg double %y
@@ -6342,27 +2698,6 @@ define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: s_setpc_b64
 define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
-; SI-LABEL: fadd_select_fneg_fneg_f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v3
-; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; SI-NEXT:    v_sub_f32_e32 v0, v3, v0
-; SI-NEXT:    s_setpc_b64 s[30:31]
-;
-; VI-LABEL: fadd_select_fneg_fneg_f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
-; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg half %x
   %neg.y = fneg half %y
@@ -6395,39 +2730,6 @@ define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
 ; VI-NEXT: v_sub_f16_e32 v0, v3, v0
 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
 define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x half> %y, <2 x half> %z) {
-; SI-LABEL: fadd_select_fneg_fneg_v2f16:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT:    v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT:    v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT:    v_or_b32_e32 v1, v1, v2
-; SI-NEXT:    v_cvt_f16_f32_e32 v2, v4
-; SI-NEXT:    v_cvt_f16_f32_e32 v4, v6
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT:    v_or_b32_e32 v2, v3, v2
-; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; SI-NEXT:    v_cvt_f32_f16_e32 v3, v4
-; SI-NEXT:    v_cvt_f32_f16_e32 v4, v5
-; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT:    v_sub_f32_e32 v0, v4, v0
-; SI-NEXT:    v_sub_f32_e32 v1, v3, v1
-; SI-NEXT:    s_setpc_b64 s[30:31]
-;
-; VI-LABEL: fadd_select_fneg_fneg_v2f16:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT:    v_sub_f16_sdwa v1, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NEXT:    v_sub_f16_e32 v0, v3, v0
-; VI-NEXT:    v_or_b32_e32 v0, v0, v1
-; VI-NEXT:    s_setpc_b64 s[30:31]
   %cmp = icmp eq i32 %arg0, 0
   %neg.x = fneg <2 x half> %x
   %neg.y = fneg <2 x half> %y
@@ -6444,12 +2746,6 @@ define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x ha
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
-; GCN-LABEL: v_fneg_select_f32:
-; GCN:       ; %bb.0:
-; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v2, -v1, vcc
-; GCN-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %select = select i1 %cond, float %a, float %b
   %fneg = fneg float %select
@@ -6473,23 +2769,6 @@ define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
 
 ; GCN-NEXT: s_setpc_b64
 define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
-; GCN-SAFE-LABEL: v_fneg_select_2_f32:
-; GCN-SAFE:       ; %bb.0:
-; GCN-SAFE-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-SAFE-NEXT:    v_add_f32_e32 v1, 2.0, v1
-; GCN-SAFE-NEXT:    v_add_f32_e32 v2, 4.0, v2
-; GCN-SAFE-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-SAFE-NEXT:    v_cndmask_b32_e64 v0, -v2, -v1, vcc
-; GCN-SAFE-NEXT:    s_setpc_b64 s[30:31]
-;
-; GCN-NSZ-LABEL: v_fneg_select_2_f32:
-; GCN-NSZ:       ; %bb.0:
-; GCN-NSZ-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NSZ-NEXT:    v_sub_f32_e32 v1, -2.0, v1
-; GCN-NSZ-NEXT:    v_sub_f32_e32 v2, -4.0, v2
-; GCN-NSZ-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NSZ-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
-; GCN-NSZ-NEXT:    s_setpc_b64 s[30:31]
   %cond = icmp eq i32 %arg0, 0
   %add.0 = fadd float %a, 2.0
   %add.1 = fadd float %b, 4.0
@@ -6503,41 +2782,6 @@ define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
-; SI-LABEL: v_fneg_posk_select_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v4, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT:    v_cndmask_b32_e64 v0, -4.0, -v4, vcc
-; SI-NEXT:    flat_store_dword v[1:2], v0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_posk_select_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v4, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e64 v0, -4.0, -v4, vcc
-; VI-NEXT:    flat_store_dword v[1:2], v0
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -6555,41 +2799,6 @@ define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr add
 ; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}}, vcc
 ; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
 define amdgpu_kernel void @v_fneg_negk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
-; SI-LABEL: v_fneg_negk_select_f32:
-; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s3
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s2, v3
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    flat_load_dword v4, v[1:2] glc
-; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v2, s1
-; SI-NEXT:    v_add_i32_e32 v1, vcc, s0, v3
-; SI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; SI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT:    v_cndmask_b32_e64 v0, 4.0, -v4, vcc
-; SI-NEXT:    flat_store_dword v[1:2], v0
-; SI-NEXT:    s_endpgm
-;
-; VI-LABEL: v_fneg_negk_select_f32:
-; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_lshlrev_b32_e32 v3, 2, v0
-; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s3
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s2, v3
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    flat_load_dword v4, v[1:2] glc
-; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v2, s1
-; VI-NEXT:    v_add_u32_e32 v1, vcc, s0, v3
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e64 v0, 4.0, -v4, vcc
-; VI-NEXT:    flat_store_dword v[1:2], v0
-; VI-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
   %a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
index 13beea3782921..46da9d33639b6 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.new.ll
@@ -1532,9 +1532,10 @@ define float @v_fneg_inv2pi_minimum_f32(float %a) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f32_e32 v1, 0.15915494, v0
-; VI-NEXT:    v_mov_b32_e32 v2, 0xffc00000
+; VI-NEXT:    v_mov_b32_e32 v2, 0x7fc00000
 ; VI-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; VI-NEXT:    v_cndmask_b32_e64 v0, v2, -v1, vcc
+; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0x3FC45F3060000000, float %a)
   %fneg = fneg float %min
@@ -1581,10 +1582,10 @@ define half @v_fneg_inv2pi_minimum_f16(half %a) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f16_e32 v1, 0.15915494, v0
-; VI-NEXT:    v_xor_b32_e32 v1, 0x8000, v1
-; VI-NEXT:    v_mov_b32_e32 v2, 0xfe00
+; VI-NEXT:    v_mov_b32_e32 v2, 0x7e00
 ; VI-NEXT:    v_cmp_o_f16_e32 vcc, v0, v0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call half @llvm.minimum.f16(half 0xH3118, half %a)
   %fneg = fneg half %min
@@ -1689,10 +1690,10 @@ define float @v_fneg_0_minimum_foldable_use_f32_ieee(float %a, float %b) #0 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_min_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0.0, float %a)
   %fneg = fneg float %min
@@ -1716,10 +1717,10 @@ define float @v_fneg_inv2pi_minimum_foldable_use_f32(float %a, float %b) #0 {
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_min_f32_e32 v2, 0.15915494, v0
-; VI-NEXT:    v_mov_b32_e32 v3, 0xffc00000
+; VI-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
 ; VI-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; VI-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
-; VI-NEXT:    v_mul_f32_e32 v0, v0, v1
+; VI-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; VI-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0x3FC45F3060000000, float %a)
   %fneg = fneg float %min
@@ -1732,10 +1733,10 @@ define float @v_fneg_0_minimum_foldable_use_f32_no_ieee(float %a, float %b) #4 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_min_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %min = call float @llvm.minimum.f32(float 0.0, float %a)
   %fneg = fneg float %min
@@ -1934,10 +1935,10 @@ define float @v_fneg_0_maximum_foldable_use_f32_ieee(float %a, float %b) #0 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_max_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %max = call float @llvm.maximum.f32(float 0.0, float %a)
   %fneg = fneg float %max
@@ -1950,10 +1951,10 @@ define float @v_fneg_0_maximum_foldable_use_f32_no_ieee(float %a, float %b) #4 {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_max_f32_e32 v2, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, 0xffc00000
+; GCN-NEXT:    v_mov_b32_e32 v3, 0x7fc00000
 ; GCN-NEXT:    v_cmp_o_f32_e32 vcc, v0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, -v2, vcc
-; GCN-NEXT:    v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
+; GCN-NEXT:    v_mul_f32_e64 v0, -v0, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %max = call float @llvm.maximum.f32(float 0.0, float %a)
   %fneg = fneg float %max
@@ -3961,13 +3962,12 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f32(float %arg, i1 %
 ; SI-LABEL: s_fneg_select_infloop_regression_f32:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_bitcmp1_b32 s1, 0
-; SI-NEXT:    v_mov_b32_e32 v1, s0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[0:1]
-; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v2, -v0, 0, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    flat_store_dword v[0:1], v2
@@ -3976,13 +3976,12 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f32(float %arg, i1 %
 ; VI-LABEL: s_fneg_select_infloop_regression_f32:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bitcmp1_b32 s1, 0
-; VI-NEXT:    v_mov_b32_e32 v1, s0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[0:1]
-; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; VI-NEXT:    v_cndmask_b32_e64 v2, -v0, 0, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    flat_store_dword v[0:1], v2
@@ -3999,10 +3998,9 @@ define float @v_fneg_select_infloop_regression_f32(float %arg, i1 %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
-; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 0.0, float %arg
   %i2 = fneg float %i
@@ -4015,10 +4013,9 @@ define float @v_fneg_select_infloop_regression_f32_commute0(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
-; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, -v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 0.0
   %i2 = fneg float %i
@@ -4031,10 +4028,9 @@ define float @v_fneg_select_infloop_regression_f32_commute1(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
-; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 0.0, float %arg
   %i2 = fneg float %i
@@ -4047,10 +4043,9 @@ define float @v_fneg_select_infloop_regression_f32_commute2(float %arg, i1 %arg1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
-; GCN-NEXT:    v_bfrev_b32_e32 v2, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 0.0
   %i2 = fneg float %i
@@ -4065,8 +4060,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32(float %arg, i1 %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 2.0, float %arg
   %i2 = fneg float %i
@@ -4080,8 +4075,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute0(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 2.0
   %i2 = fneg float %i
@@ -4095,8 +4090,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute1(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float 2.0, float %arg
   %i2 = fneg float %i
@@ -4110,8 +4105,8 @@ define float @v_fneg_select_infloop_regression_inline_imm_f32_commute2(float %ar
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float 2.0
   %i2 = fneg float %i
@@ -4126,8 +4121,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32(float %arg, i1
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float -2.0, float %arg
   %i2 = fneg float %i
@@ -4141,8 +4136,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute0(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, -2.0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float -2.0
   %i2 = fneg float %i
@@ -4156,8 +4151,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute1(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, 2.0, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, -2.0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float -2.0, float %arg
   %i2 = fneg float %i
@@ -4171,8 +4166,8 @@ define float @v_fneg_select_infloop_regression_neg_inline_imm_f32_commute2(float
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v1, 1, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 2.0, -v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, -2.0, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -2.0, -v0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, float %arg, float -2.0
   %i2 = fneg float %i
@@ -4186,16 +4181,17 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f64(double %arg, i1
 ; SI-NEXT:    s_load_dword s6, s[4:5], 0xb
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0xd
+; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_and_b32 s4, 1, s6
-; SI-NEXT:    s_xor_b32 s1, s1, 0x80000000
-; SI-NEXT:    s_cmp_eq_u32 s4, 1
-; SI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
+; SI-NEXT:    s_bitcmp1_b32 s6, 0
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_and_b64 s[6:7], s[4:5], exec
+; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[4:5]
 ; SI-NEXT:    s_cselect_b32 s0, 0, s0
-; SI-NEXT:    s_cselect_b32 s1, 0, s1
 ; SI-NEXT:    v_mov_b32_e32 v2, s2
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
 ; SI-NEXT:    v_mov_b32_e32 v3, s3
 ; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; SI-NEXT:    s_endpgm
@@ -4205,16 +4201,17 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f64(double %arg, i1
 ; VI-NEXT:    s_load_dword s6, s[4:5], 0x2c
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_and_b32 s4, 1, s6
-; VI-NEXT:    s_xor_b32 s1, s1, 0x80000000
-; VI-NEXT:    s_cmp_eq_u32 s4, 1
-; VI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
+; VI-NEXT:    s_bitcmp1_b32 s6, 0
+; VI-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_and_b64 s[6:7], s[4:5], exec
+; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[4:5]
 ; VI-NEXT:    s_cselect_b32 s0, 0, s0
-; VI-NEXT:    s_cselect_b32 s1, 0, s1
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -4230,10 +4227,9 @@ define double @v_fneg_select_infloop_regression_f64(double %arg, i1 %arg1) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 1, v2
-; GCN-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-NEXT:    v_bfrev_b32_e32 v3, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, -v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
@@ -4248,14 +4244,12 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f16(half %arg, i1 %a
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dword s2, s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xb
-; SI-NEXT:    v_bfrev_b32_e32 v1, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_and_b32 s3, s2, 0xffff
-; SI-NEXT:    v_cvt_f32_f16_e64 v0, -s3
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, s2
 ; SI-NEXT:    s_bitcmp1_b32 s2, 16
-; SI-NEXT:    s_cselect_b64 vcc, -1, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, s[2:3]
 ; SI-NEXT:    v_cvt_f16_f32_e32 v2, v0
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
@@ -4266,14 +4260,13 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_f16(half %arg, i1 %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dword s2, s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x2c
-; VI-NEXT:    v_mov_b32_e32 v0, 0x8000
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_bitcmp1_b32 s2, 16
-; VI-NEXT:    s_cselect_b64 vcc, -1, 0
-; VI-NEXT:    s_xor_b32 s2, s2, 0x8000
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
+; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
+; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[2:3]
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    flat_store_short v[0:1], v2
@@ -4289,23 +4282,21 @@ define half @v_fneg_select_infloop_regression_f16(half %arg, i1 %arg1) {
 ; SI-LABEL: v_fneg_select_infloop_regression_f16:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_cvt_f16_f32_e64 v0, -v0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; SI-NEXT:    v_and_b32_e32 v1, 1, v1
-; SI-NEXT:    v_bfrev_b32_e32 v2, 1
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v0, -v0, 0, vcc
 ; SI-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; VI-LABEL: v_fneg_select_infloop_regression_f16:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_and_b32_e32 v1, 1, v1
-; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
-; VI-NEXT:    v_mov_b32_e32 v2, 0x8000
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x8000, v0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, half 0.0, half %arg
@@ -4320,9 +4311,9 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f16(<2 x half> %ar
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_and_b32 s1, 1, s1
+; SI-NEXT:    s_cselect_b32 s0, 0, s0
 ; SI-NEXT:    s_xor_b32 s0, s0, 0x80008000
 ; SI-NEXT:    s_cmp_eq_u32 s1, 1
-; SI-NEXT:    s_cselect_b32 s0, 0x80008000, s0
 ; SI-NEXT:    s_cselect_b32 s0, 0, s0
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
@@ -4335,9 +4326,9 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f16(<2 x half> %ar
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_and_b32 s1, 1, s1
+; VI-NEXT:    s_cselect_b32 s0, 0, s0
 ; VI-NEXT:    s_xor_b32 s0, s0, 0x80008000
 ; VI-NEXT:    s_cmp_eq_u32 s1, 1
-; VI-NEXT:    s_cselect_b32 s0, 0x80008000, s0
 ; VI-NEXT:    s_cselect_b32 s0, 0, s0
 ; VI-NEXT:    v_mov_b32_e32 v0, s2
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
@@ -4360,10 +4351,9 @@ define <2 x half> @v_fneg_select_infloop_regression_v2f16(<2 x half> %arg, i1 %a
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; SI-NEXT:    v_or_b32_e32 v0, v0, v1
 ; SI-NEXT:    v_and_b32_e32 v1, 1, v2
-; SI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; SI-NEXT:    v_mov_b32_e32 v2, 0x80008000
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; SI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, vcc
 ; SI-NEXT:    v_cvt_f32_f16_e32 v0, v1
 ; SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
@@ -4374,10 +4364,9 @@ define <2 x half> @v_fneg_select_infloop_regression_v2f16(<2 x half> %arg, i1 %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; VI-NEXT:    v_and_b32_e32 v1, 1, v1
-; VI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
-; VI-NEXT:    v_mov_b32_e32 v2, 0x80008000
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; VI-NEXT:    v_xor_b32_e32 v0, 0x80008000, v0
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; VI-NEXT:    s_setpc_b64 s[30:31]
   %i = select i1 %arg1, <2 x half> zeroinitializer, <2 x half> %arg
@@ -4391,18 +4380,17 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f32(<2 x float> %a
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_and_b32 s2, 1, s2
-; SI-NEXT:    s_xor_b32 s1, s1, 0x80000000
-; SI-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; SI-NEXT:    s_cmp_eq_u32 s2, 1
-; SI-NEXT:    s_cselect_b32 s0, 0x80000000, s0
-; SI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
-; SI-NEXT:    s_cselect_b32 s1, 0, s1
-; SI-NEXT:    s_cselect_b32 s0, 0, s0
-; SI-NEXT:    v_mov_b32_e32 v2, s4
-; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    s_bitcmp1_b32 s2, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s0
+; SI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v2, -v1, v0, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
+; SI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v2, s4
 ; SI-NEXT:    v_mov_b32_e32 v3, s5
 ; SI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; SI-NEXT:    s_endpgm
@@ -4411,18 +4399,17 @@ define amdgpu_kernel void @s_fneg_select_infloop_regression_v2f32(<2 x float> %a
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT:    v_bfrev_b32_e32 v0, 1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_and_b32 s2, 1, s2
-; VI-NEXT:    s_xor_b32 s1, s1, 0x80000000
-; VI-NEXT:    s_xor_b32 s0, s0, 0x80000000
-; VI-NEXT:    s_cmp_eq_u32 s2, 1
-; VI-NEXT:    s_cselect_b32 s0, 0x80000000, s0
-; VI-NEXT:    s_cselect_b32 s1, 0x80000000, s1
-; VI-NEXT:    s_cselect_b32 s1, 0, s1
-; VI-NEXT:    s_cselect_b32 s0, 0, s0
-; VI-NEXT:    v_mov_b32_e32 v2, s4
-; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    s_bitcmp1_b32 s2, 0
+; VI-NEXT:    v_mov_b32_e32 v1, s0
+; VI-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, -v1, v0, s[2:3]
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    v_cndmask_b32_e64 v0, -v1, v0, s[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
+; VI-NEXT:    v_cndmask_b32_e64 v0, v2, 0, s[2:3]
+; VI-NEXT:    v_mov_b32_e32 v2, s4
 ; VI-NEXT:    v_mov_b32_e32 v3, s5
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
@@ -4438,12 +4425,10 @@ define <2 x float> @v_fneg_select_infloop_regression_v2f32(<2 x float> %arg, i1
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_and_b32_e32 v2, 1, v2
-; GCN-NEXT:    v_xor_b32_e32 v1, 0x80000000, v1
 ; GCN-NEXT:    v_bfrev_b32_e32 v3, 1
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GCN-NEXT:    v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, -v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, -v0, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]

>From e29429e0706f76b7e2b3db0af7ca2f862b9dc132 Mon Sep 17 00:00:00 2001
From: Chris Jackson <chris.jackson at amd.com>
Date: Fri, 6 Jun 2025 07:47:44 -0500
Subject: [PATCH 7/7] Respond to reviewer comments - add maybe_unused

---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2d319dd4aa669..3a879022ccfb9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5940,7 +5940,7 @@ SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
 // Enable lowering of ROTR for vxi32 types. This is a workaround for a
 // regression caused by legalising v2i32 or.
 SDValue SITargetLowering::lowerROTR(SDValue Op, SelectionDAG &DAG) const {
-  EVT VT = Op.getValueType();
+  [[maybe_unused]] EVT VT = Op.getValueType();
 
   assert((VT == MVT::v2i32 || VT == MVT::v4i32 || VT == MVT::v8i32 ||
           VT == MVT::v16i32) &&
@@ -12997,7 +12997,7 @@ SDValue SITargetLowering::performXorCombine(SDNode *N,
     SDValue LHS_0 = LHS.getOperand(0);
     SDValue LHS_1 = LHS.getOperand(1);
 
-    if (LHS.getOpcode() == ISD::VSELECT && VT == MVT::v2i32) {
+    if (LHS.getOpcode() == ISD::VSELECT) {
       if (CRHS_0 && CRHS_0->getAPIntValue().isSignMask() &&
           shouldFoldFNegIntoSrc(N, LHS_0))
         if (CRHS_1 && CRHS_1->getAPIntValue().isSignMask() &&



More information about the llvm-commits mailing list